aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile9
-rw-r--r--Makefile.objs1
-rw-r--r--accel/Makefile.objs1
-rw-r--r--accel/kvm/kvm-all.c95
-rw-r--r--accel/kvm/trace-events1
-rw-r--r--accel/stubs/Makefile.objs1
-rw-r--r--accel/stubs/xen-stub.c (renamed from stubs/xen-hvm.c)23
-rw-r--r--accel/xen/Makefile.objs1
-rw-r--r--accel/xen/xen-all.c (renamed from hw/xen/xen-common.c)12
-rw-r--r--chardev/char-socket.c4
-rwxr-xr-xconfigure25
-rw-r--r--cpus.c26
-rw-r--r--disas.c13
-rw-r--r--exec.c29
-rw-r--r--hmp-commands-info.hx7
-rw-r--r--hw/Makefile.objs2
-rw-r--r--hw/acpi/piix4.c2
-rw-r--r--hw/block/vhost-user-blk.c1
-rw-r--r--hw/core/machine.c4
-rw-r--r--hw/core/numa.c5
-rw-r--r--hw/hyperv/Kconfig5
-rw-r--r--hw/hyperv/Makefile.objs1
-rw-r--r--hw/hyperv/hyperv.c8
-rw-r--r--hw/hyperv/trace-events18
-rw-r--r--hw/hyperv/vmbus.c2778
-rw-r--r--hw/i386/acpi-build.c43
-rw-r--r--hw/i386/amd_iommu.c19
-rw-r--r--hw/i386/pc.c3
-rw-r--r--hw/i386/pc_piix.c5
-rw-r--r--hw/i386/pc_q35.c3
-rw-r--r--hw/i386/vmmouse.c20
-rw-r--r--hw/i386/vmport.c183
-rw-r--r--hw/i386/vmport.h34
-rw-r--r--hw/i386/xen/xen-hvm.c1
-rw-r--r--hw/i386/xen/xen_platform.c1
-rw-r--r--hw/intc/ioapic.c19
-rw-r--r--hw/isa/piix3.c1
-rw-r--r--hw/pci-host/pam.c1
-rw-r--r--hw/pci/msix.c1
-rw-r--r--hw/scsi/megasas.c44
-rw-r--r--hw/scsi/vhost-user-scsi.c1
-rw-r--r--hw/usb/hcd-musb.c5
-rw-r--r--hw/usb/tusb6010.c1
-rw-r--r--hw/vfio/pci.c37
-rw-r--r--hw/xen/Makefile.objs3
-rw-r--r--hw/xen/xen_pt.c12
-rw-r--r--hw/xen/xen_pt.h6
-rw-r--r--hw/xen/xen_pt_stub.c22
-rw-r--r--include/exec/cpu-all.h1
-rw-r--r--include/exec/cpu-common.h3
-rw-r--r--include/exec/memory.h30
-rw-r--r--include/exec/ram_addr.h2
-rw-r--r--include/hw/display/edid.h3
-rw-r--r--include/hw/elf_ops.h11
-rw-r--r--include/hw/hyperv/hyperv.h1
-rw-r--r--include/hw/hyperv/vmbus-bridge.h35
-rw-r--r--include/hw/hyperv/vmbus-proto.h222
-rw-r--r--include/hw/hyperv/vmbus.h230
-rw-r--r--include/hw/i386/vmport.h28
-rw-r--r--include/hw/usb.h30
-rw-r--r--include/hw/usb/hcd-musb.h47
-rw-r--r--include/hw/xen/xen.h11
-rw-r--r--include/io/task.h2
-rw-r--r--include/qemu/thread.h2
-rw-r--r--include/qemu/typedefs.h2
-rw-r--r--include/qom/object.h2
-rw-r--r--include/qom/qom-qobject.h2
-rw-r--r--include/sysemu/accel.h2
-rw-r--r--include/sysemu/hvf.h72
-rw-r--r--include/sysemu/kvm.h4
-rw-r--r--include/sysemu/sysemu.h1
-rw-r--r--include/sysemu/tcg.h2
-rw-r--r--include/sysemu/xen.h38
-rw-r--r--io/task.c1
-rw-r--r--memory.c75
-rw-r--r--migration/savevm.c2
-rw-r--r--monitor/misc.c3
-rw-r--r--qom/container.c1
-rw-r--r--qom/object.c39
-rw-r--r--replay/replay-internal.c15
-rw-r--r--replay/replay.c5
-rwxr-xr-xscripts/checkpatch.pl2
-rw-r--r--scripts/coverity-scan/coverity-scan.docker3
-rwxr-xr-xscripts/coverity-scan/run-coverity-scan139
-rw-r--r--softmmu/vl.c7
-rw-r--r--stubs/Makefile.objs2
-rw-r--r--stubs/qmp_memory_device.c1
-rw-r--r--stubs/xen-common.c13
-rw-r--r--target/i386/TODO31
-rw-r--r--target/i386/cpu.c42
-rw-r--r--target/i386/cpu.h15
-rw-r--r--target/i386/fpu_helper.c258
-rw-r--r--target/i386/hax-all.c25
-rw-r--r--target/i386/hax-i386.h7
-rw-r--r--target/i386/hvf/hvf-i386.h37
-rw-r--r--target/i386/hvf/hvf.c30
-rw-r--r--target/i386/hvf/x86.c2
-rw-r--r--target/i386/hvf/x86.h89
-rw-r--r--target/i386/hvf/x86_decode.c25
-rw-r--r--target/i386/hvf/x86_emu.c122
-rw-r--r--target/i386/hvf/x86_flags.c81
-rw-r--r--target/i386/hvf/x86_task.c10
-rw-r--r--target/i386/hvf/x86hvf.c6
-rw-r--r--target/i386/kvm.c34
-rw-r--r--target/i386/misc_helper.c2
-rw-r--r--target/i386/ops_sse.h57
-rw-r--r--target/i386/sev.c257
-rw-r--r--target/i386/sev_i386.h49
-rw-r--r--target/ppc/cpu.h4
-rw-r--r--target/ppc/kvm_ppc.h22
-rw-r--r--target/ppc/translate_init.inc.c4
-rw-r--r--tests/docker/Makefile.include2
-rwxr-xr-xtests/docker/docker.py14
-rw-r--r--tests/qtest/machine-none-test.c10
-rw-r--r--tests/tcg/i386/Makefile.target3
-rw-r--r--tests/tcg/i386/test-i386-fbstp.c140
-rw-r--r--tests/tcg/i386/test-i386-fisttp.c100
-rw-r--r--tests/tcg/i386/test-i386-fldcst.c199
-rw-r--r--tests/tcg/i386/test-i386-fp-exceptions.c831
-rw-r--r--tests/tcg/i386/test-i386-fscale.c108
-rw-r--r--tests/tcg/i386/test-i386-fxam.c143
-rw-r--r--tests/tcg/i386/test-i386-fxtract.c120
-rw-r--r--tests/tcg/i386/test-i386-pcmpistri.c33
-rw-r--r--tests/test-io-task.c1
-rw-r--r--util/oslib-posix.c9
126 files changed, 6572 insertions, 980 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 13396310d9..a922775e45 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -440,6 +440,7 @@ M: Paul Durrant <paul@xen.org>
L: xen-devel@lists.xenproject.org
S: Supported
F: */xen*
+F: accel/xen/*
F: hw/9pfs/xen-9p*
F: hw/char/xen_console.c
F: hw/display/xenfb.c
@@ -453,6 +454,7 @@ F: hw/i386/xen/
F: hw/pci-host/xen_igd_pt.c
F: include/hw/block/dataplane/xen*
F: include/hw/xen/
+F: include/sysemu/xen.h
F: include/sysemu/xen-mapcache.h
Guest CPU Cores (HAXM)
diff --git a/Makefile b/Makefile
index d1af126ea1..ed0ed93b2d 100644
--- a/Makefile
+++ b/Makefile
@@ -336,9 +336,9 @@ $(call set-vpath, $(SRC_PATH))
LIBS+=-lz $(LIBS_TOOLS)
vhost-user-json-y =
-HELPERS-y =
+HELPERS-y = $(HELPERS)
-HELPERS-$(call land,$(CONFIG_SOFTMMU),$(CONFIG_LINUX)) = qemu-bridge-helper$(EXESUF)
+HELPERS-$(call land,$(CONFIG_SOFTMMU),$(CONFIG_LINUX)) += qemu-bridge-helper$(EXESUF)
ifeq ($(CONFIG_LINUX)$(CONFIG_VIRGL)$(CONFIG_GBM)$(CONFIG_TOOLS),yyyy)
HELPERS-y += vhost-user-gpu$(EXESUF)
@@ -1258,6 +1258,11 @@ endif
$(call print-help-run,$(t)/fuzz,Build fuzzer for $(t)); \
))) \
echo '')
+ @$(if $(HELPERS-y), \
+ echo 'Helper targets:'; \
+ $(foreach t, $(HELPERS-y), \
+ $(call print-help-run,$(t),Build $(shell basename $(t)));) \
+ echo '')
@$(if $(TOOLS), \
echo 'Tools targets:'; \
$(foreach t, $(TOOLS), \
diff --git a/Makefile.objs b/Makefile.objs
index 99774cfd25..c09d95dfe3 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -150,6 +150,7 @@ trace-events-subdirs += hw/block/dataplane
trace-events-subdirs += hw/char
trace-events-subdirs += hw/dma
trace-events-subdirs += hw/hppa
+trace-events-subdirs += hw/hyperv
trace-events-subdirs += hw/i2c
trace-events-subdirs += hw/i386
trace-events-subdirs += hw/i386/xen
diff --git a/accel/Makefile.objs b/accel/Makefile.objs
index 17e5ac6061..ff72f0d030 100644
--- a/accel/Makefile.objs
+++ b/accel/Makefile.objs
@@ -2,4 +2,5 @@ common-obj-$(CONFIG_SOFTMMU) += accel.o
obj-$(call land,$(CONFIG_SOFTMMU),$(CONFIG_POSIX)) += qtest.o
obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_TCG) += tcg/
+obj-$(CONFIG_XEN) += xen/
obj-y += stubs/
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index d06cc04079..f24d7da783 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -160,9 +160,59 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = {
static NotifierList kvm_irqchip_change_notifiers =
NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
+struct KVMResampleFd {
+ int gsi;
+ EventNotifier *resample_event;
+ QLIST_ENTRY(KVMResampleFd) node;
+};
+typedef struct KVMResampleFd KVMResampleFd;
+
+/*
+ * Only used with split irqchip where we need to do the resample fd
+ * kick for the kernel from userspace.
+ */
+static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
+ QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
+
#define kvm_slots_lock(kml) qemu_mutex_lock(&(kml)->slots_lock)
#define kvm_slots_unlock(kml) qemu_mutex_unlock(&(kml)->slots_lock)
+static inline void kvm_resample_fd_remove(int gsi)
+{
+ KVMResampleFd *rfd;
+
+ QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
+ if (rfd->gsi == gsi) {
+ QLIST_REMOVE(rfd, node);
+ g_free(rfd);
+ break;
+ }
+ }
+}
+
+static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
+{
+ KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
+
+ rfd->gsi = gsi;
+ rfd->resample_event = event;
+
+ QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
+}
+
+void kvm_resample_fd_notify(int gsi)
+{
+ KVMResampleFd *rfd;
+
+ QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
+ if (rfd->gsi == gsi) {
+ event_notifier_set(rfd->resample_event);
+ trace_kvm_resample_fd_notify(gsi);
+ return;
+ }
+ }
+}
+
int kvm_get_max_memslots(void)
{
KVMState *s = KVM_STATE(current_accel());
@@ -1662,9 +1712,13 @@ int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
return kvm_update_routing_entry(s, &kroute);
}
-static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
+static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
+ EventNotifier *resample, int virq,
bool assign)
{
+ int fd = event_notifier_get_fd(event);
+ int rfd = resample ? event_notifier_get_fd(resample) : -1;
+
struct kvm_irqfd irqfd = {
.fd = fd,
.gsi = virq,
@@ -1672,8 +1726,33 @@ static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
};
if (rfd != -1) {
- irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
- irqfd.resamplefd = rfd;
+ assert(assign);
+ if (kvm_irqchip_is_split()) {
+ /*
+ * When the slow irqchip (e.g. IOAPIC) is in the
+ * userspace, KVM kernel resamplefd will not work because
+ * the EOI of the interrupt will be delivered to userspace
+ * instead, so the KVM kernel resamplefd kick will be
+ * skipped. The userspace here mimics what the kernel
+ * provides with resamplefd, remember the resamplefd and
+ * kick it when we receive EOI of this IRQ.
+ *
+ * This is hackery because IOAPIC is mostly bypassed
+ * (except EOI broadcasts) when irqfd is used. However
+ * this can bring much performance back for split irqchip
+ * with INTx IRQs (for VFIO, this gives 93% perf of the
+ * full fast path, which is 46% perf boost comparing to
+ * the INTx slow path).
+ */
+ kvm_resample_fd_insert(virq, resample);
+ } else {
+ irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
+ irqfd.resamplefd = rfd;
+ }
+ } else if (!assign) {
+ if (kvm_irqchip_is_split()) {
+ kvm_resample_fd_remove(virq);
+ }
}
if (!kvm_irqfds_enabled()) {
@@ -1769,7 +1848,9 @@ int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
return -ENOSYS;
}
-static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
+static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
+ EventNotifier *resample, int virq,
+ bool assign)
{
abort();
}
@@ -1783,15 +1864,13 @@ int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
EventNotifier *rn, int virq)
{
- return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
- rn ? event_notifier_get_fd(rn) : -1, virq, true);
+ return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
}
int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
int virq)
{
- return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
- false);
+ return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
}
int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events
index 4fb6e59d19..a68eb66534 100644
--- a/accel/kvm/trace-events
+++ b/accel/kvm/trace-events
@@ -16,4 +16,5 @@ kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_
kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d"
kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d"
kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32
+kvm_resample_fd_notify(int gsi) "gsi %d"
diff --git a/accel/stubs/Makefile.objs b/accel/stubs/Makefile.objs
index 3894caf95d..bbd14e71fb 100644
--- a/accel/stubs/Makefile.objs
+++ b/accel/stubs/Makefile.objs
@@ -3,3 +3,4 @@ obj-$(call lnot,$(CONFIG_HVF)) += hvf-stub.o
obj-$(call lnot,$(CONFIG_WHPX)) += whpx-stub.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
obj-$(call lnot,$(CONFIG_TCG)) += tcg-stub.o
+obj-$(call lnot,$(CONFIG_XEN)) += xen-stub.o
diff --git a/stubs/xen-hvm.c b/accel/stubs/xen-stub.c
index b7d53b5e2f..dcca4e678a 100644
--- a/stubs/xen-hvm.c
+++ b/accel/stubs/xen-stub.c
@@ -1,18 +1,18 @@
/*
- * Copyright (C) 2010 Citrix Ltd.
+ * Copyright (C) 2014 Citrix Systems UK Ltd.
*
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "hw/xen/xen.h"
-#include "exec/memory.h"
#include "qapi/qapi-commands-misc.h"
+void xenstore_store_pv_console_info(int i, Chardev *chr)
+{
+}
+
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
{
return -1;
@@ -35,11 +35,6 @@ int xen_is_pirq_msi(uint32_t msi_data)
return 0;
}
-void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
- Error **errp)
-{
-}
-
qemu_irq *xen_interrupt_controller_init(void)
{
return NULL;
@@ -49,10 +44,6 @@ void xen_register_framebuffer(MemoryRegion *mr)
{
}
-void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
-{
-}
-
void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
{
}
diff --git a/accel/xen/Makefile.objs b/accel/xen/Makefile.objs
new file mode 100644
index 0000000000..7482cfb436
--- /dev/null
+++ b/accel/xen/Makefile.objs
@@ -0,0 +1 @@
+obj-y += xen-all.o
diff --git a/hw/xen/xen-common.c b/accel/xen/xen-all.c
index 70564cc952..0c24d4b191 100644
--- a/hw/xen/xen-common.c
+++ b/accel/xen/xen-all.c
@@ -16,6 +16,7 @@
#include "hw/xen/xen_pt.h"
#include "chardev/char.h"
#include "sysemu/accel.h"
+#include "sysemu/xen.h"
#include "sysemu/runstate.h"
#include "migration/misc.h"
#include "migration/global_state.h"
@@ -31,6 +32,13 @@
do { } while (0)
#endif
+static bool xen_allowed;
+
+bool xen_enabled(void)
+{
+ return xen_allowed;
+}
+
xc_interface *xen_xc;
xenforeignmemory_handle *xen_fmem;
xendevicemodel_handle *xen_dmod;
@@ -129,12 +137,12 @@ static void xen_change_state_handler(void *opaque, int running,
static bool xen_get_igd_gfx_passthru(Object *obj, Error **errp)
{
- return has_igd_gfx_passthru;
+ return xen_igd_gfx_pt_enabled();
}
static void xen_set_igd_gfx_passthru(Object *obj, bool value, Error **errp)
{
- has_igd_gfx_passthru = value;
+ xen_igd_gfx_pt_set(value, errp);
}
static void xen_setup_post(MachineState *ms, AccelState *accel)
diff --git a/chardev/char-socket.c b/chardev/char-socket.c
index 18e762643b..afebeec5c3 100644
--- a/chardev/char-socket.c
+++ b/chardev/char-socket.c
@@ -551,7 +551,9 @@ static int tcp_chr_sync_read(Chardev *chr, const uint8_t *buf, int len)
qio_channel_set_blocking(s->ioc, true, NULL);
size = tcp_chr_recv(chr, (void *) buf, len);
- qio_channel_set_blocking(s->ioc, false, NULL);
+ if (s->state != TCP_CHARDEV_STATE_DISCONNECTED) {
+ qio_channel_set_blocking(s->ioc, false, NULL);
+ }
if (size == 0) {
/* connection closed */
tcp_chr_disconnect(chr);
diff --git a/configure b/configure
index 7c2adf36e5..bb7fd12612 100755
--- a/configure
+++ b/configure
@@ -4587,7 +4587,13 @@ fi
if test "$tcmalloc" = "yes" ; then
cat > $TMPC << EOF
#include <stdlib.h>
-int main(void) { malloc(1); return 0; }
+int main(void) {
+ void *tmp = malloc(1);
+ if (tmp != NULL) {
+ return 0;
+ }
+ return 1;
+}
EOF
if compile_prog "" "-ltcmalloc" ; then
@@ -4603,7 +4609,13 @@ fi
if test "$jemalloc" = "yes" ; then
cat > $TMPC << EOF
#include <stdlib.h>
-int main(void) { malloc(1); return 0; }
+int main(void) {
+ void *tmp = malloc(1);
+ if (tmp != NULL) {
+ return 0;
+ }
+ return 1;
+}
EOF
if compile_prog "" "-ljemalloc" ; then
@@ -6164,7 +6176,9 @@ if test "$sanitizers" = "yes" ; then
#include <stdlib.h>
int main(void) {
void *tmp = malloc(10);
- return *(int *)(tmp + 2);
+ if (tmp != NULL) {
+ return *(int *)(tmp + 2);
+ }
}
EOF
if compile_prog "$CPU_CFLAGS -Werror -fsanitize=undefined" ""; then
@@ -6394,7 +6408,7 @@ if test "$softmmu" = yes ; then
if test "$linux" = yes; then
if test "$virtfs" != no && test "$cap_ng" = yes && test "$attr" = yes ; then
virtfs=yes
- tools="$tools fsdev/virtfs-proxy-helper\$(EXESUF)"
+ helpers="$helpers fsdev/virtfs-proxy-helper\$(EXESUF)"
else
if test "$virtfs" = yes; then
error_exit "VirtFS requires libcap-ng devel and libattr devel"
@@ -6409,7 +6423,7 @@ if test "$softmmu" = yes ; then
fi
mpath=no
fi
- tools="$tools scsi/qemu-pr-helper\$(EXESUF)"
+ helpers="$helpers scsi/qemu-pr-helper\$(EXESUF)"
else
if test "$virtfs" = yes; then
error_exit "VirtFS is supported only on Linux"
@@ -7654,6 +7668,7 @@ else
QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/\$(ARCH) $QEMU_INCLUDES"
fi
+echo "HELPERS=$helpers" >> $config_host_mak
echo "TOOLS=$tools" >> $config_host_mak
echo "ROMS=$roms" >> $config_host_mak
echo "MAKE=$make" >> $config_host_mak
diff --git a/cpus.c b/cpus.c
index 5670c96bcf..34fc203808 100644
--- a/cpus.c
+++ b/cpus.c
@@ -379,7 +379,8 @@ static void icount_adjust(void)
seqlock_write_lock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
- cur_time = cpu_get_clock_locked();
+ cur_time = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
+ cpu_get_clock_locked());
cur_icount = cpu_get_icount_locked();
delta = cur_icount - cur_time;
@@ -647,6 +648,11 @@ static bool adjust_timers_state_needed(void *opaque)
return s->icount_rt_timer != NULL;
}
+static bool shift_state_needed(void *opaque)
+{
+ return use_icount == 2;
+}
+
/*
* Subsection for warp timer migration is optional, because may not be created
*/
@@ -674,6 +680,17 @@ static const VMStateDescription icount_vmstate_adjust_timers = {
}
};
+static const VMStateDescription icount_vmstate_shift = {
+ .name = "timer/icount/shift",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = shift_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT16(icount_time_shift, TimersState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
/*
* This is a subsection for icount migration.
*/
@@ -690,6 +707,7 @@ static const VMStateDescription icount_vmstate_timers = {
.subsections = (const VMStateDescription*[]) {
&icount_vmstate_warp_timer,
&icount_vmstate_adjust_timers,
+ &icount_vmstate_shift,
NULL
}
};
@@ -803,8 +821,10 @@ void configure_icount(QemuOpts *opts, Error **errp)
bool align = qemu_opt_get_bool(opts, "align", false);
long time_shift = -1;
- if (!option && qemu_opt_get(opts, "align")) {
- error_setg(errp, "Please specify shift option when using align");
+ if (!option) {
+ if (qemu_opt_get(opts, "align") != NULL) {
+ error_setg(errp, "Please specify shift option when using align");
+ }
return;
}
diff --git a/disas.c b/disas.c
index 45285d3f63..c1397d3933 100644
--- a/disas.c
+++ b/disas.c
@@ -39,9 +39,11 @@ target_read_memory (bfd_vma memaddr,
struct disassemble_info *info)
{
CPUDebug *s = container_of(info, CPUDebug, info);
+ int r;
- cpu_memory_rw_debug(s->cpu, memaddr, myaddr, length, 0);
- return 0;
+ r = cpu_memory_rw_debug(s->cpu, memaddr, myaddr, length, 0);
+
+ return r ? EIO : 0;
}
/* Print an error message. We can assume that this is in response to
@@ -718,10 +720,11 @@ physical_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length,
struct disassemble_info *info)
{
CPUDebug *s = container_of(info, CPUDebug, info);
+ MemTxResult res;
- address_space_read(s->cpu->as, memaddr, MEMTXATTRS_UNSPECIFIED,
- myaddr, length);
- return 0;
+ res = address_space_read(s->cpu->as, memaddr, MEMTXATTRS_UNSPECIFIED,
+ myaddr, length);
+ return res == MEMTX_OK ? 0 : EIO;
}
/* Disassembler for the monitor. */
diff --git a/exec.c b/exec.c
index a0bf9d61c8..9c8f558590 100644
--- a/exec.c
+++ b/exec.c
@@ -3546,6 +3546,7 @@ void *address_space_map(AddressSpace *as,
if (!memory_access_is_direct(mr, is_write)) {
if (atomic_xchg(&bounce.in_use, true)) {
+ *plen = 0;
return NULL;
}
/* Avoid unbounded allocations */
@@ -3724,7 +3725,7 @@ static inline MemoryRegion *address_space_translate_cached(
/* Called from RCU critical section. address_space_read_cached uses this
* out of line function when the target is an MMIO or IOMMU region.
*/
-void
+MemTxResult
address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
void *buf, hwaddr len)
{
@@ -3734,15 +3735,15 @@ address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
l = len;
mr = address_space_translate_cached(cache, addr, &addr1, &l, false,
MEMTXATTRS_UNSPECIFIED);
- flatview_read_continue(cache->fv,
- addr, MEMTXATTRS_UNSPECIFIED, buf, len,
- addr1, l, mr);
+ return flatview_read_continue(cache->fv,
+ addr, MEMTXATTRS_UNSPECIFIED, buf, len,
+ addr1, l, mr);
}
/* Called from RCU critical section. address_space_write_cached uses this
* out of line function when the target is an MMIO or IOMMU region.
*/
-void
+MemTxResult
address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
const void *buf, hwaddr len)
{
@@ -3752,9 +3753,9 @@ address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
l = len;
mr = address_space_translate_cached(cache, addr, &addr1, &l, true,
MEMTXATTRS_UNSPECIFIED);
- flatview_write_continue(cache->fv,
- addr, MEMTXATTRS_UNSPECIFIED, buf, len,
- addr1, l, mr);
+ return flatview_write_continue(cache->fv,
+ addr, MEMTXATTRS_UNSPECIFIED, buf, len,
+ addr1, l, mr);
}
#define ARG1_DECL MemoryRegionCache *cache
@@ -3777,6 +3778,7 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
while (len > 0) {
int asidx;
MemTxAttrs attrs;
+ MemTxResult res;
page = addr & TARGET_PAGE_MASK;
phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
@@ -3789,11 +3791,14 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
l = len;
phys_addr += (addr & ~TARGET_PAGE_MASK);
if (is_write) {
- address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
- attrs, buf, l);
+ res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
+ attrs, buf, l);
} else {
- address_space_read(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf,
- l);
+ res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr,
+ attrs, buf, l);
+ }
+ if (res != MEMTX_OK) {
+ return -1;
}
len -= l;
buf += l;
diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx
index ca5198438d..30209e3903 100644
--- a/hmp-commands-info.hx
+++ b/hmp-commands-info.hx
@@ -254,11 +254,12 @@ ERST
{
.name = "mtree",
- .args_type = "flatview:-f,dispatch_tree:-d,owner:-o",
- .params = "[-f][-d][-o]",
+ .args_type = "flatview:-f,dispatch_tree:-d,owner:-o,disabled:-D",
+ .params = "[-f][-d][-o][-D]",
.help = "show memory tree (-f: dump flat view for address spaces;"
"-d: dump dispatch tree, valid with -f only);"
- "-o: dump region owners/parents",
+ "-o: dump region owners/parents;"
+ "-D: dump disabled regions",
.cmd = hmp_info_mtree,
},
diff --git a/hw/Makefile.objs b/hw/Makefile.objs
index 660e2b4373..4cbe5e4e57 100644
--- a/hw/Makefile.objs
+++ b/hw/Makefile.objs
@@ -35,7 +35,7 @@ devices-dirs-y += usb/
devices-dirs-$(CONFIG_VFIO) += vfio/
devices-dirs-y += virtio/
devices-dirs-y += watchdog/
-devices-dirs-y += xen/
+devices-dirs-$(CONFIG_XEN) += xen/
devices-dirs-$(CONFIG_MEM_DEVICE) += mem/
devices-dirs-$(CONFIG_NUBUS) += nubus/
devices-dirs-y += semihosting/
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index 85c199b30d..e27f57195a 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -30,6 +30,7 @@
#include "hw/acpi/acpi.h"
#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
+#include "sysemu/xen.h"
#include "qapi/error.h"
#include "qemu/range.h"
#include "exec/address-spaces.h"
@@ -41,7 +42,6 @@
#include "hw/mem/nvdimm.h"
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/acpi_dev_interface.h"
-#include "hw/xen/xen.h"
#include "migration/vmstate.h"
#include "hw/core/cpu.h"
#include "trace.h"
diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index 76838e76d3..a00b854736 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -20,7 +20,6 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/cutils.h"
-#include "qom/object.h"
#include "hw/qdev-core.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/vhost.h"
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 9eca7d8c9b..1d80ab0e1d 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -45,6 +45,10 @@ GlobalProperty hw_compat_4_2[] = {
{ "qxl", "revision", "4" },
{ "qxl-vga", "revision", "4" },
{ "fw_cfg", "acpi-mr-restore", "false" },
+ { "vmport", "x-read-set-eax", "off" },
+ { "vmport", "x-signal-unsupported-cmd", "off" },
+ { "vmport", "x-report-vmx-type", "off" },
+ { "vmport", "x-cmds-v2", "off" },
};
const size_t hw_compat_4_2_len = G_N_ELEMENTS(hw_compat_4_2);
diff --git a/hw/core/numa.c b/hw/core/numa.c
index 316bc50d75..5f81900f88 100644
--- a/hw/core/numa.c
+++ b/hw/core/numa.c
@@ -757,6 +757,11 @@ void numa_complete_configuration(MachineState *ms)
}
if (!numa_uses_legacy_mem() && mc->default_ram_id) {
+ if (ms->ram_memdev_id) {
+ error_report("'-machine memory-backend' and '-numa memdev'"
+ " properties are mutually exclusive");
+ exit(1);
+ }
ms->ram = g_new(MemoryRegion, 1);
memory_region_init(ms->ram, OBJECT(ms), mc->default_ram_id,
ram_size);
diff --git a/hw/hyperv/Kconfig b/hw/hyperv/Kconfig
index a1fa8ff9be..3fbfe41c9e 100644
--- a/hw/hyperv/Kconfig
+++ b/hw/hyperv/Kconfig
@@ -6,3 +6,8 @@ config HYPERV_TESTDEV
bool
default y if TEST_DEVICES
depends on HYPERV
+
+config VMBUS
+ bool
+ default y
+ depends on HYPERV
diff --git a/hw/hyperv/Makefile.objs b/hw/hyperv/Makefile.objs
index edaca2f763..5b614e040c 100644
--- a/hw/hyperv/Makefile.objs
+++ b/hw/hyperv/Makefile.objs
@@ -1,2 +1,3 @@
obj-y += hyperv.o
obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o
+obj-$(CONFIG_VMBUS) += vmbus.o
diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
index 4b11f7a76b..a3933c34c6 100644
--- a/hw/hyperv/hyperv.c
+++ b/hw/hyperv/hyperv.c
@@ -38,6 +38,13 @@ typedef struct SynICState {
#define TYPE_SYNIC "hyperv-synic"
#define SYNIC(obj) OBJECT_CHECK(SynICState, (obj), TYPE_SYNIC)
+static bool synic_enabled;
+
+bool hyperv_is_synic_enabled(void)
+{
+ return synic_enabled;
+}
+
static SynICState *get_synic(CPUState *cs)
{
return SYNIC(object_resolve_path_component(OBJECT(cs), "synic"));
@@ -134,6 +141,7 @@ void hyperv_synic_add(CPUState *cs)
object_property_add_child(OBJECT(cs), "synic", obj);
object_unref(obj);
object_property_set_bool(obj, true, "realized", &error_abort);
+ synic_enabled = true;
}
void hyperv_synic_reset(CPUState *cs)
diff --git a/hw/hyperv/trace-events b/hw/hyperv/trace-events
new file mode 100644
index 0000000000..ba5bd62d61
--- /dev/null
+++ b/hw/hyperv/trace-events
@@ -0,0 +1,18 @@
+# vmbus
+vmbus_recv_message(uint32_t type, uint32_t size) "type %d size %d"
+vmbus_signal_event(void) ""
+vmbus_channel_notify_guest(uint32_t chan_id) "channel #%d"
+vmbus_post_msg(uint32_t type, uint32_t size) "type %d size %d"
+vmbus_msg_cb(int status) "message status %d"
+vmbus_process_incoming_message(uint32_t message_type) "type %d"
+vmbus_initiate_contact(uint16_t major, uint16_t minor, uint32_t vcpu, uint64_t monitor_page1, uint64_t monitor_page2, uint64_t interrupt_page) "version %d.%d target vp %d mon pages 0x%"PRIx64",0x%"PRIx64" int page 0x%"PRIx64
+vmbus_send_offer(uint32_t chan_id, void *dev) "channel #%d dev %p"
+vmbus_terminate_offers(void) ""
+vmbus_gpadl_header(uint32_t gpadl_id, uint16_t num_gfns) "gpadl #%d gfns %d"
+vmbus_gpadl_body(uint32_t gpadl_id) "gpadl #%d"
+vmbus_gpadl_created(uint32_t gpadl_id) "gpadl #%d"
+vmbus_gpadl_teardown(uint32_t gpadl_id) "gpadl #%d"
+vmbus_gpadl_torndown(uint32_t gpadl_id) "gpadl #%d"
+vmbus_open_channel(uint32_t chan_id, uint32_t gpadl_id, uint32_t target_vp) "channel #%d gpadl #%d target vp %d"
+vmbus_channel_open(uint32_t chan_id, uint32_t status) "channel #%d status %d"
+vmbus_close_channel(uint32_t chan_id) "channel #%d"
diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c
new file mode 100644
index 0000000000..f371240176
--- /dev/null
+++ b/hw/hyperv/vmbus.c
@@ -0,0 +1,2778 @@
+/*
+ * QEMU Hyper-V VMBus
+ *
+ * Copyright (c) 2017-2018 Virtuozzo International GmbH.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qapi/error.h"
+#include "migration/vmstate.h"
+#include "hw/qdev-properties.h"
+#include "hw/hyperv/hyperv.h"
+#include "hw/hyperv/vmbus.h"
+#include "hw/hyperv/vmbus-bridge.h"
+#include "hw/sysbus.h"
+#include "cpu.h"
+#include "trace.h"
+
+#define TYPE_VMBUS "vmbus"
+#define VMBUS(obj) OBJECT_CHECK(VMBus, (obj), TYPE_VMBUS)
+
+enum {
+ VMGPADL_INIT,
+ VMGPADL_ALIVE,
+ VMGPADL_TEARINGDOWN,
+ VMGPADL_TORNDOWN,
+};
+
+struct VMBusGpadl {
+ /* GPADL id */
+ uint32_t id;
+ /* associated channel id (rudimentary?) */
+ uint32_t child_relid;
+
+ /* number of pages in the GPADL as declared in GPADL_HEADER message */
+ uint32_t num_gfns;
+ /*
+ * Due to limited message size, GPADL may not fit fully in a single
+ * GPADL_HEADER message, and is further popluated using GPADL_BODY
+ * messages. @seen_gfns is the number of pages seen so far; once it
+ * reaches @num_gfns, the GPADL is ready to use.
+ */
+ uint32_t seen_gfns;
+ /* array of GFNs (of size @num_gfns once allocated) */
+ uint64_t *gfns;
+
+ uint8_t state;
+
+ QTAILQ_ENTRY(VMBusGpadl) link;
+ VMBus *vmbus;
+ unsigned refcount;
+};
+
+/*
+ * Wrap sequential read from / write to GPADL.
+ */
+typedef struct GpadlIter {
+ VMBusGpadl *gpadl;
+ AddressSpace *as;
+ DMADirection dir;
+ /* offset into GPADL where the next i/o will be performed */
+ uint32_t off;
+ /*
+ * Cached mapping of the currently accessed page, up to page boundary.
+ * Updated lazily on i/o.
+ * Note: MemoryRegionCache can not be used here because pages in the GPADL
+ * are non-contiguous and may belong to different memory regions.
+ */
+ void *map;
+ /* offset after last i/o (i.e. not affected by seek) */
+ uint32_t last_off;
+ /*
+ * Indicator that the iterator is active and may have a cached mapping.
+ * Allows to enforce bracketing of all i/o (which may create cached
+ * mappings) and thus exclude mapping leaks.
+ */
+ bool active;
+} GpadlIter;
+
+/*
+ * Ring buffer. There are two of them, sitting in the same GPADL, for each
+ * channel.
+ * Each ring buffer consists of a set of pages, with the first page containing
+ * the ring buffer header, and the remaining pages being for data packets.
+ */
+typedef struct VMBusRingBufCommon {
+ AddressSpace *as;
+ /* GPA of the ring buffer header */
+ dma_addr_t rb_addr;
+ /* start and length of the ring buffer data area within GPADL */
+ uint32_t base;
+ uint32_t len;
+
+ GpadlIter iter;
+} VMBusRingBufCommon;
+
+typedef struct VMBusSendRingBuf {
+ VMBusRingBufCommon common;
+ /* current write index, to be committed at the end of send */
+ uint32_t wr_idx;
+ /* write index at the start of send */
+ uint32_t last_wr_idx;
+ /* space to be requested from the guest */
+ uint32_t wanted;
+ /* space reserved for planned sends */
+ uint32_t reserved;
+ /* last seen read index */
+ uint32_t last_seen_rd_idx;
+} VMBusSendRingBuf;
+
+typedef struct VMBusRecvRingBuf {
+ VMBusRingBufCommon common;
+ /* current read index, to be committed at the end of receive */
+ uint32_t rd_idx;
+ /* read index at the start of receive */
+ uint32_t last_rd_idx;
+ /* last seen write index */
+ uint32_t last_seen_wr_idx;
+} VMBusRecvRingBuf;
+
+
+enum {
+ VMOFFER_INIT,
+ VMOFFER_SENDING,
+ VMOFFER_SENT,
+};
+
+enum {
+ VMCHAN_INIT,
+ VMCHAN_OPENING,
+ VMCHAN_OPEN,
+};
+
+struct VMBusChannel {
+ VMBusDevice *dev;
+
+ /* channel id */
+ uint32_t id;
+ /*
+ * subchannel index within the device; subchannel #0 is "primary" and
+ * always exists
+ */
+ uint16_t subchan_idx;
+ uint32_t open_id;
+ /* VP_INDEX of the vCPU to notify with (synthetic) interrupts */
+ uint32_t target_vp;
+ /* GPADL id to use for the ring buffers */
+ uint32_t ringbuf_gpadl;
+ /* start (in pages) of the send ring buffer within @ringbuf_gpadl */
+ uint32_t ringbuf_send_offset;
+
+ uint8_t offer_state;
+ uint8_t state;
+ bool is_open;
+
+ /* main device worker; copied from the device class */
+ VMBusChannelNotifyCb notify_cb;
+ /*
+ * guest->host notifications, either sent directly or dispatched via
+ * interrupt page (older VMBus)
+ */
+ EventNotifier notifier;
+
+ VMBus *vmbus;
+ /*
+ * SINT route to signal with host->guest notifications; may be shared with
+ * the main VMBus SINT route
+ */
+ HvSintRoute *notify_route;
+ VMBusGpadl *gpadl;
+
+ VMBusSendRingBuf send_ringbuf;
+ VMBusRecvRingBuf recv_ringbuf;
+
+ QTAILQ_ENTRY(VMBusChannel) link;
+};
+
+/*
+ * Hyper-V spec mandates that every message port has 16 buffers, which means
+ * that the guest can post up to this many messages without blocking.
+ * Therefore a queue for incoming messages has to be provided.
+ * For outgoing (i.e. host->guest) messages there's no queue; the VMBus just
+ * doesn't transition to a new state until the message is known to have been
+ * successfully delivered to the respective SynIC message slot.
+ */
+#define HV_MSG_QUEUE_LEN 16
+
+/* Hyper-V devices never use channel #0. Must be something special. */
+#define VMBUS_FIRST_CHANID 1
+/* Each channel occupies one bit within a single event page sint slot. */
+#define VMBUS_CHANID_COUNT (HV_EVENT_FLAGS_COUNT - VMBUS_FIRST_CHANID)
+/* Leave a few connection numbers for other purposes. */
+#define VMBUS_CHAN_CONNECTION_OFFSET 16
+
+/*
+ * Since the success or failure of sending a message is reported
+ * asynchronously, the VMBus state machine has effectively two entry points:
+ * vmbus_run and vmbus_msg_cb (the latter is called when the host->guest
+ * message delivery status becomes known). Both are run as oneshot BHs on the
+ * main aio context, ensuring serialization.
+ */
+enum {
+ VMBUS_LISTEN,
+ VMBUS_HANDSHAKE,
+ VMBUS_OFFER,
+ VMBUS_CREATE_GPADL,
+ VMBUS_TEARDOWN_GPADL,
+ VMBUS_OPEN_CHANNEL,
+ VMBUS_UNLOAD,
+ VMBUS_STATE_MAX
+};
+
+struct VMBus {
+ BusState parent;
+
+ uint8_t state;
+ /* protection against recursive aio_poll (see vmbus_run) */
+ bool in_progress;
+ /* whether there's a message being delivered to the guest */
+ bool msg_in_progress;
+ uint32_t version;
+ /* VP_INDEX of the vCPU to send messages and interrupts to */
+ uint32_t target_vp;
+ HvSintRoute *sint_route;
+ /*
+ * interrupt page for older protocol versions; newer ones use SynIC event
+ * flags directly
+ */
+ hwaddr int_page_gpa;
+
+ DECLARE_BITMAP(chanid_bitmap, VMBUS_CHANID_COUNT);
+
+ /* incoming message queue */
+ struct hyperv_post_message_input rx_queue[HV_MSG_QUEUE_LEN];
+ uint8_t rx_queue_head;
+ uint8_t rx_queue_size;
+ QemuMutex rx_queue_lock;
+
+ QTAILQ_HEAD(, VMBusGpadl) gpadl_list;
+ QTAILQ_HEAD(, VMBusChannel) channel_list;
+
+ /*
+ * guest->host notifications for older VMBus, to be dispatched via
+ * interrupt page
+ */
+ EventNotifier notifier;
+};
+
+static bool gpadl_full(VMBusGpadl *gpadl)
+{
+ return gpadl->seen_gfns == gpadl->num_gfns;
+}
+
+static VMBusGpadl *create_gpadl(VMBus *vmbus, uint32_t id,
+ uint32_t child_relid, uint32_t num_gfns)
+{
+ VMBusGpadl *gpadl = g_new0(VMBusGpadl, 1);
+
+ gpadl->id = id;
+ gpadl->child_relid = child_relid;
+ gpadl->num_gfns = num_gfns;
+ gpadl->gfns = g_new(uint64_t, num_gfns);
+ QTAILQ_INSERT_HEAD(&vmbus->gpadl_list, gpadl, link);
+ gpadl->vmbus = vmbus;
+ gpadl->refcount = 1;
+ return gpadl;
+}
+
+static void free_gpadl(VMBusGpadl *gpadl)
+{
+ QTAILQ_REMOVE(&gpadl->vmbus->gpadl_list, gpadl, link);
+ g_free(gpadl->gfns);
+ g_free(gpadl);
+}
+
+static VMBusGpadl *find_gpadl(VMBus *vmbus, uint32_t gpadl_id)
+{
+ VMBusGpadl *gpadl;
+ QTAILQ_FOREACH(gpadl, &vmbus->gpadl_list, link) {
+ if (gpadl->id == gpadl_id) {
+ return gpadl;
+ }
+ }
+ return NULL;
+}
+
+VMBusGpadl *vmbus_get_gpadl(VMBusChannel *chan, uint32_t gpadl_id)
+{
+ VMBusGpadl *gpadl = find_gpadl(chan->vmbus, gpadl_id);
+ if (!gpadl || !gpadl_full(gpadl)) {
+ return NULL;
+ }
+ gpadl->refcount++;
+ return gpadl;
+}
+
+void vmbus_put_gpadl(VMBusGpadl *gpadl)
+{
+ if (!gpadl) {
+ return;
+ }
+ if (--gpadl->refcount) {
+ return;
+ }
+ free_gpadl(gpadl);
+}
+
+uint32_t vmbus_gpadl_len(VMBusGpadl *gpadl)
+{
+ return gpadl->num_gfns * TARGET_PAGE_SIZE;
+}
+
+static void gpadl_iter_init(GpadlIter *iter, VMBusGpadl *gpadl,
+ AddressSpace *as, DMADirection dir)
+{
+ iter->gpadl = gpadl;
+ iter->as = as;
+ iter->dir = dir;
+ iter->active = false;
+}
+
+static inline void gpadl_iter_cache_unmap(GpadlIter *iter)
+{
+ uint32_t map_start_in_page = (uintptr_t)iter->map & ~TARGET_PAGE_MASK;
+ uint32_t io_end_in_page = ((iter->last_off - 1) & ~TARGET_PAGE_MASK) + 1;
+
+ /* mapping is only done to do non-zero amount of i/o */
+ assert(iter->last_off > 0);
+ assert(map_start_in_page < io_end_in_page);
+
+ dma_memory_unmap(iter->as, iter->map, TARGET_PAGE_SIZE - map_start_in_page,
+ iter->dir, io_end_in_page - map_start_in_page);
+}
+
+/*
+ * Copy exactly @len bytes between the GPADL pointed to by @iter and @buf.
+ * The direction of the copy is determined by @iter->dir.
+ * The caller must ensure the operation overflows neither @buf nor the GPADL
+ * (there's an assert for the latter).
+ * Reuse the currently mapped page in the GPADL if possible.
+ */
+static ssize_t gpadl_iter_io(GpadlIter *iter, void *buf, uint32_t len)
+{
+ ssize_t ret = len;
+
+ assert(iter->active);
+
+ while (len) {
+ uint32_t off_in_page = iter->off & ~TARGET_PAGE_MASK;
+ uint32_t pgleft = TARGET_PAGE_SIZE - off_in_page;
+ uint32_t cplen = MIN(pgleft, len);
+ void *p;
+
+ /* try to reuse the cached mapping */
+ if (iter->map) {
+ uint32_t map_start_in_page =
+ (uintptr_t)iter->map & ~TARGET_PAGE_MASK;
+ uint32_t off_base = iter->off & ~TARGET_PAGE_MASK;
+ uint32_t mapped_base = (iter->last_off - 1) & ~TARGET_PAGE_MASK;
+ if (off_base != mapped_base || off_in_page < map_start_in_page) {
+ gpadl_iter_cache_unmap(iter);
+ iter->map = NULL;
+ }
+ }
+
+ if (!iter->map) {
+ dma_addr_t maddr;
+ dma_addr_t mlen = pgleft;
+ uint32_t idx = iter->off >> TARGET_PAGE_BITS;
+ assert(idx < iter->gpadl->num_gfns);
+
+ maddr = (iter->gpadl->gfns[idx] << TARGET_PAGE_BITS) | off_in_page;
+
+ iter->map = dma_memory_map(iter->as, maddr, &mlen, iter->dir);
+ if (mlen != pgleft) {
+ dma_memory_unmap(iter->as, iter->map, mlen, iter->dir, 0);
+ iter->map = NULL;
+ return -EFAULT;
+ }
+ }
+
+ p = (void *)(((uintptr_t)iter->map & TARGET_PAGE_MASK) | off_in_page);
+ if (iter->dir == DMA_DIRECTION_FROM_DEVICE) {
+ memcpy(p, buf, cplen);
+ } else {
+ memcpy(buf, p, cplen);
+ }
+
+ buf += cplen;
+ len -= cplen;
+ iter->off += cplen;
+ iter->last_off = iter->off;
+ }
+
+ return ret;
+}
+
+/*
+ * Position the iterator @iter at new offset @new_off.
+ * If this results in the cached mapping being unusable with the new offset,
+ * unmap it.
+ */
+static inline void gpadl_iter_seek(GpadlIter *iter, uint32_t new_off)
+{
+ assert(iter->active);
+ iter->off = new_off;
+}
+
+/*
+ * Start a series of i/o on the GPADL.
+ * After this i/o and seek operations on @iter become legal.
+ */
+static inline void gpadl_iter_start_io(GpadlIter *iter)
+{
+ assert(!iter->active);
+ /* mapping is cached lazily on i/o */
+ iter->map = NULL;
+ iter->active = true;
+}
+
+/*
+ * End the eariler started series of i/o on the GPADL and release the cached
+ * mapping if any.
+ */
+static inline void gpadl_iter_end_io(GpadlIter *iter)
+{
+ assert(iter->active);
+
+ if (iter->map) {
+ gpadl_iter_cache_unmap(iter);
+ }
+
+ iter->active = false;
+}
+
+static void vmbus_resched(VMBus *vmbus);
+static void vmbus_msg_cb(void *data, int status);
+
+ssize_t vmbus_iov_to_gpadl(VMBusChannel *chan, VMBusGpadl *gpadl, uint32_t off,
+ const struct iovec *iov, size_t iov_cnt)
+{
+ GpadlIter iter;
+ size_t i;
+ ssize_t ret = 0;
+
+ gpadl_iter_init(&iter, gpadl, chan->dev->dma_as,
+ DMA_DIRECTION_FROM_DEVICE);
+ gpadl_iter_start_io(&iter);
+ gpadl_iter_seek(&iter, off);
+ for (i = 0; i < iov_cnt; i++) {
+ ret = gpadl_iter_io(&iter, iov[i].iov_base, iov[i].iov_len);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+out:
+ gpadl_iter_end_io(&iter);
+ return ret;
+}
+
+int vmbus_map_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
+ unsigned iov_cnt, size_t len, size_t off)
+{
+ int ret_cnt = 0, ret;
+ unsigned i;
+ QEMUSGList *sgl = &req->sgl;
+ ScatterGatherEntry *sg = sgl->sg;
+
+ for (i = 0; i < sgl->nsg; i++) {
+ if (sg[i].len > off) {
+ break;
+ }
+ off -= sg[i].len;
+ }
+ for (; len && i < sgl->nsg; i++) {
+ dma_addr_t mlen = MIN(sg[i].len - off, len);
+ dma_addr_t addr = sg[i].base + off;
+ len -= mlen;
+ off = 0;
+
+ for (; mlen; ret_cnt++) {
+ dma_addr_t l = mlen;
+ dma_addr_t a = addr;
+
+ if (ret_cnt == iov_cnt) {
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ iov[ret_cnt].iov_base = dma_memory_map(sgl->as, a, &l, dir);
+ if (!l) {
+ ret = -EFAULT;
+ goto err;
+ }
+ iov[ret_cnt].iov_len = l;
+ addr += l;
+ mlen -= l;
+ }
+ }
+
+ return ret_cnt;
+err:
+ vmbus_unmap_sgl(req, dir, iov, ret_cnt, 0);
+ return ret;
+}
+
+void vmbus_unmap_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
+ unsigned iov_cnt, size_t accessed)
+{
+ QEMUSGList *sgl = &req->sgl;
+ unsigned i;
+
+ for (i = 0; i < iov_cnt; i++) {
+ size_t acsd = MIN(accessed, iov[i].iov_len);
+ dma_memory_unmap(sgl->as, iov[i].iov_base, iov[i].iov_len, dir, acsd);
+ accessed -= acsd;
+ }
+}
+
+static const VMStateDescription vmstate_gpadl = {
+ .name = "vmbus/gpadl",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(id, VMBusGpadl),
+ VMSTATE_UINT32(child_relid, VMBusGpadl),
+ VMSTATE_UINT32(num_gfns, VMBusGpadl),
+ VMSTATE_UINT32(seen_gfns, VMBusGpadl),
+ VMSTATE_VARRAY_UINT32_ALLOC(gfns, VMBusGpadl, num_gfns, 0,
+ vmstate_info_uint64, uint64_t),
+ VMSTATE_UINT8(state, VMBusGpadl),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/*
+ * Wrap the index into a ring buffer of @len bytes.
+ * @idx is assumed not to exceed twice the size of the ringbuffer, so only
+ * single wraparound is considered.
+ */
+static inline uint32_t rb_idx_wrap(uint32_t idx, uint32_t len)
+{
+ if (idx >= len) {
+ idx -= len;
+ }
+ return idx;
+}
+
+/*
+ * Circular difference between two indices into a ring buffer of @len bytes.
+ * @allow_catchup - whether @idx1 may catch up @idx2; e.g. read index may catch
+ * up write index but not vice versa.
+ */
+static inline uint32_t rb_idx_delta(uint32_t idx1, uint32_t idx2, uint32_t len,
+ bool allow_catchup)
+{
+ return rb_idx_wrap(idx2 + len - idx1 - !allow_catchup, len);
+}
+
+static vmbus_ring_buffer *ringbuf_map_hdr(VMBusRingBufCommon *ringbuf)
+{
+ vmbus_ring_buffer *rb;
+ dma_addr_t mlen = sizeof(*rb);
+
+ rb = dma_memory_map(ringbuf->as, ringbuf->rb_addr, &mlen,
+ DMA_DIRECTION_FROM_DEVICE);
+ if (mlen != sizeof(*rb)) {
+ dma_memory_unmap(ringbuf->as, rb, mlen,
+ DMA_DIRECTION_FROM_DEVICE, 0);
+ return NULL;
+ }
+ return rb;
+}
+
+static void ringbuf_unmap_hdr(VMBusRingBufCommon *ringbuf,
+ vmbus_ring_buffer *rb, bool dirty)
+{
+ assert(rb);
+
+ dma_memory_unmap(ringbuf->as, rb, sizeof(*rb), DMA_DIRECTION_FROM_DEVICE,
+ dirty ? sizeof(*rb) : 0);
+}
+
+static void ringbuf_init_common(VMBusRingBufCommon *ringbuf, VMBusGpadl *gpadl,
+ AddressSpace *as, DMADirection dir,
+ uint32_t begin, uint32_t end)
+{
+ ringbuf->as = as;
+ ringbuf->rb_addr = gpadl->gfns[begin] << TARGET_PAGE_BITS;
+ ringbuf->base = (begin + 1) << TARGET_PAGE_BITS;
+ ringbuf->len = (end - begin - 1) << TARGET_PAGE_BITS;
+ gpadl_iter_init(&ringbuf->iter, gpadl, as, dir);
+}
+
+static int ringbufs_init(VMBusChannel *chan)
+{
+ vmbus_ring_buffer *rb;
+ VMBusSendRingBuf *send_ringbuf = &chan->send_ringbuf;
+ VMBusRecvRingBuf *recv_ringbuf = &chan->recv_ringbuf;
+
+ if (chan->ringbuf_send_offset <= 1 ||
+ chan->gpadl->num_gfns <= chan->ringbuf_send_offset + 1) {
+ return -EINVAL;
+ }
+
+ ringbuf_init_common(&recv_ringbuf->common, chan->gpadl, chan->dev->dma_as,
+ DMA_DIRECTION_TO_DEVICE, 0, chan->ringbuf_send_offset);
+ ringbuf_init_common(&send_ringbuf->common, chan->gpadl, chan->dev->dma_as,
+ DMA_DIRECTION_FROM_DEVICE, chan->ringbuf_send_offset,
+ chan->gpadl->num_gfns);
+ send_ringbuf->wanted = 0;
+ send_ringbuf->reserved = 0;
+
+ rb = ringbuf_map_hdr(&recv_ringbuf->common);
+ if (!rb) {
+ return -EFAULT;
+ }
+ recv_ringbuf->rd_idx = recv_ringbuf->last_rd_idx = rb->read_index;
+ ringbuf_unmap_hdr(&recv_ringbuf->common, rb, false);
+
+ rb = ringbuf_map_hdr(&send_ringbuf->common);
+ if (!rb) {
+ return -EFAULT;
+ }
+ send_ringbuf->wr_idx = send_ringbuf->last_wr_idx = rb->write_index;
+ send_ringbuf->last_seen_rd_idx = rb->read_index;
+ rb->feature_bits |= VMBUS_RING_BUFFER_FEAT_PENDING_SZ;
+ ringbuf_unmap_hdr(&send_ringbuf->common, rb, true);
+
+ if (recv_ringbuf->rd_idx >= recv_ringbuf->common.len ||
+ send_ringbuf->wr_idx >= send_ringbuf->common.len) {
+ return -EOVERFLOW;
+ }
+
+ return 0;
+}
+
+/*
+ * Perform io between the GPADL-backed ringbuffer @ringbuf and @buf, wrapping
+ * around if needed.
+ * @len is assumed not to exceed the size of the ringbuffer, so only single
+ * wraparound is considered.
+ */
+static ssize_t ringbuf_io(VMBusRingBufCommon *ringbuf, void *buf, uint32_t len)
+{
+ ssize_t ret1 = 0, ret2 = 0;
+ uint32_t remain = ringbuf->len + ringbuf->base - ringbuf->iter.off;
+
+ if (len >= remain) {
+ ret1 = gpadl_iter_io(&ringbuf->iter, buf, remain);
+ if (ret1 < 0) {
+ return ret1;
+ }
+ gpadl_iter_seek(&ringbuf->iter, ringbuf->base);
+ buf += remain;
+ len -= remain;
+ }
+ ret2 = gpadl_iter_io(&ringbuf->iter, buf, len);
+ if (ret2 < 0) {
+ return ret2;
+ }
+ return ret1 + ret2;
+}
+
+/*
+ * Position the circular iterator within @ringbuf to offset @new_off, wrapping
+ * around if needed.
+ * @new_off is assumed not to exceed twice the size of the ringbuffer, so only
+ * single wraparound is considered.
+ */
+static inline void ringbuf_seek(VMBusRingBufCommon *ringbuf, uint32_t new_off)
+{
+ gpadl_iter_seek(&ringbuf->iter,
+ ringbuf->base + rb_idx_wrap(new_off, ringbuf->len));
+}
+
+static inline uint32_t ringbuf_tell(VMBusRingBufCommon *ringbuf)
+{
+ return ringbuf->iter.off - ringbuf->base;
+}
+
+static inline void ringbuf_start_io(VMBusRingBufCommon *ringbuf)
+{
+ gpadl_iter_start_io(&ringbuf->iter);
+}
+
+static inline void ringbuf_end_io(VMBusRingBufCommon *ringbuf)
+{
+ gpadl_iter_end_io(&ringbuf->iter);
+}
+
+VMBusDevice *vmbus_channel_device(VMBusChannel *chan)
+{
+ return chan->dev;
+}
+
+VMBusChannel *vmbus_device_channel(VMBusDevice *dev, uint32_t chan_idx)
+{
+ if (chan_idx >= dev->num_channels) {
+ return NULL;
+ }
+ return &dev->channels[chan_idx];
+}
+
+uint32_t vmbus_channel_idx(VMBusChannel *chan)
+{
+ return chan - chan->dev->channels;
+}
+
+void vmbus_channel_notify_host(VMBusChannel *chan)
+{
+ event_notifier_set(&chan->notifier);
+}
+
+bool vmbus_channel_is_open(VMBusChannel *chan)
+{
+ return chan->is_open;
+}
+
+/*
+ * Notify the guest side about the data to work on in the channel ring buffer.
+ * The notification is done by signaling a dedicated per-channel SynIC event
+ * flag (more recent guests) or setting a bit in the interrupt page and firing
+ * the VMBus SINT (older guests).
+ */
+static int vmbus_channel_notify_guest(VMBusChannel *chan)
+{
+ int res = 0;
+ unsigned long *int_map, mask;
+ unsigned idx;
+ hwaddr addr = chan->vmbus->int_page_gpa;
+ hwaddr len = TARGET_PAGE_SIZE / 2, dirty = 0;
+
+ trace_vmbus_channel_notify_guest(chan->id);
+
+ if (!addr) {
+ return hyperv_set_event_flag(chan->notify_route, chan->id);
+ }
+
+ int_map = cpu_physical_memory_map(addr, &len, 1);
+ if (len != TARGET_PAGE_SIZE / 2) {
+ res = -ENXIO;
+ goto unmap;
+ }
+
+ idx = BIT_WORD(chan->id);
+ mask = BIT_MASK(chan->id);
+ if ((atomic_fetch_or(&int_map[idx], mask) & mask) != mask) {
+ res = hyperv_sint_route_set_sint(chan->notify_route);
+ dirty = len;
+ }
+
+unmap:
+ cpu_physical_memory_unmap(int_map, len, 1, dirty);
+ return res;
+}
+
+#define VMBUS_PKT_TRAILER sizeof(uint64_t)
+
+static uint32_t vmbus_pkt_hdr_set_offsets(vmbus_packet_hdr *hdr,
+ uint32_t desclen, uint32_t msglen)
+{
+ hdr->offset_qwords = sizeof(*hdr) / sizeof(uint64_t) +
+ DIV_ROUND_UP(desclen, sizeof(uint64_t));
+ hdr->len_qwords = hdr->offset_qwords +
+ DIV_ROUND_UP(msglen, sizeof(uint64_t));
+ return hdr->len_qwords * sizeof(uint64_t) + VMBUS_PKT_TRAILER;
+}
+
+/*
+ * Simplified ring buffer operation with paired barriers annotations in the
+ * producer and consumer loops:
+ *
+ * producer * consumer
+ * ~~~~~~~~ * ~~~~~~~~
+ * write pending_send_sz * read write_index
+ * smp_mb [A] * smp_mb [C]
+ * read read_index * read packet
+ * smp_mb [B] * read/write out-of-band data
+ * read/write out-of-band data * smp_mb [B]
+ * write packet * write read_index
+ * smp_mb [C] * smp_mb [A]
+ * write write_index * read pending_send_sz
+ * smp_wmb [D] * smp_rmb [D]
+ * write pending_send_sz * read write_index
+ * ... * ...
+ */
+
+static inline uint32_t ringbuf_send_avail(VMBusSendRingBuf *ringbuf)
+{
+ /* don't trust guest data */
+ if (ringbuf->last_seen_rd_idx >= ringbuf->common.len) {
+ return 0;
+ }
+ return rb_idx_delta(ringbuf->wr_idx, ringbuf->last_seen_rd_idx,
+ ringbuf->common.len, false);
+}
+
+static ssize_t ringbuf_send_update_idx(VMBusChannel *chan)
+{
+ VMBusSendRingBuf *ringbuf = &chan->send_ringbuf;
+ vmbus_ring_buffer *rb;
+ uint32_t written;
+
+ written = rb_idx_delta(ringbuf->last_wr_idx, ringbuf->wr_idx,
+ ringbuf->common.len, true);
+ if (!written) {
+ return 0;
+ }
+
+ rb = ringbuf_map_hdr(&ringbuf->common);
+ if (!rb) {
+ return -EFAULT;
+ }
+
+ ringbuf->reserved -= written;
+
+ /* prevent reorder with the data operation and packet write */
+ smp_mb(); /* barrier pair [C] */
+ rb->write_index = ringbuf->wr_idx;
+
+ /*
+ * If the producer earlier indicated that it wants to be notified when the
+ * consumer frees certain amount of space in the ring buffer, that amount
+ * is reduced by the size of the completed write.
+ */
+ if (ringbuf->wanted) {
+ /* otherwise reservation would fail */
+ assert(ringbuf->wanted < written);
+ ringbuf->wanted -= written;
+ /* prevent reorder with write_index write */
+ smp_wmb(); /* barrier pair [D] */
+ rb->pending_send_sz = ringbuf->wanted;
+ }
+
+ /* prevent reorder with write_index or pending_send_sz write */
+ smp_mb(); /* barrier pair [A] */
+ ringbuf->last_seen_rd_idx = rb->read_index;
+
+ /*
+ * The consumer may have missed the reduction of pending_send_sz and skip
+ * notification, so re-check the blocking condition, and, if it's no longer
+ * true, ensure processing another iteration by simulating consumer's
+ * notification.
+ */
+ if (ringbuf_send_avail(ringbuf) >= ringbuf->wanted) {
+ vmbus_channel_notify_host(chan);
+ }
+
+ /* skip notification by consumer's request */
+ if (rb->interrupt_mask) {
+ goto out;
+ }
+
+ /*
+ * The consumer hasn't caught up with the producer's previous state so it's
+ * not blocked.
+ * (last_seen_rd_idx comes from the guest but it's safe to use w/o
+ * validation here as it only affects notification.)
+ */
+ if (rb_idx_delta(ringbuf->last_seen_rd_idx, ringbuf->wr_idx,
+ ringbuf->common.len, true) > written) {
+ goto out;
+ }
+
+ vmbus_channel_notify_guest(chan);
+out:
+ ringbuf_unmap_hdr(&ringbuf->common, rb, true);
+ ringbuf->last_wr_idx = ringbuf->wr_idx;
+ return written;
+}
+
+int vmbus_channel_reserve(VMBusChannel *chan,
+ uint32_t desclen, uint32_t msglen)
+{
+ VMBusSendRingBuf *ringbuf = &chan->send_ringbuf;
+ vmbus_ring_buffer *rb = NULL;
+ vmbus_packet_hdr hdr;
+ uint32_t needed = ringbuf->reserved +
+ vmbus_pkt_hdr_set_offsets(&hdr, desclen, msglen);
+
+ /* avoid touching the guest memory if possible */
+ if (likely(needed <= ringbuf_send_avail(ringbuf))) {
+ goto success;
+ }
+
+ rb = ringbuf_map_hdr(&ringbuf->common);
+ if (!rb) {
+ return -EFAULT;
+ }
+
+ /* fetch read index from guest memory and try again */
+ ringbuf->last_seen_rd_idx = rb->read_index;
+
+ if (likely(needed <= ringbuf_send_avail(ringbuf))) {
+ goto success;
+ }
+
+ rb->pending_send_sz = needed;
+
+ /*
+ * The consumer may have made progress and freed up some space before
+ * seeing updated pending_send_sz, so re-read read_index (preventing
+ * reorder with the pending_send_sz write) and try again.
+ */
+ smp_mb(); /* barrier pair [A] */
+ ringbuf->last_seen_rd_idx = rb->read_index;
+
+ if (needed > ringbuf_send_avail(ringbuf)) {
+ goto out;
+ }
+
+success:
+ ringbuf->reserved = needed;
+ needed = 0;
+
+ /* clear pending_send_sz if it was set */
+ if (ringbuf->wanted) {
+ if (!rb) {
+ rb = ringbuf_map_hdr(&ringbuf->common);
+ if (!rb) {
+ /* failure to clear pending_send_sz is non-fatal */
+ goto out;
+ }
+ }
+
+ rb->pending_send_sz = 0;
+ }
+
+ /* prevent reorder of the following data operation with read_index read */
+ smp_mb(); /* barrier pair [B] */
+
+out:
+ if (rb) {
+ ringbuf_unmap_hdr(&ringbuf->common, rb, ringbuf->wanted == needed);
+ }
+ ringbuf->wanted = needed;
+ return needed ? -ENOSPC : 0;
+}
+
+ssize_t vmbus_channel_send(VMBusChannel *chan, uint16_t pkt_type,
+ void *desc, uint32_t desclen,
+ void *msg, uint32_t msglen,
+ bool need_comp, uint64_t transaction_id)
+{
+ ssize_t ret = 0;
+ vmbus_packet_hdr hdr;
+ uint32_t totlen;
+ VMBusSendRingBuf *ringbuf = &chan->send_ringbuf;
+
+ if (!vmbus_channel_is_open(chan)) {
+ return -EINVAL;
+ }
+
+ totlen = vmbus_pkt_hdr_set_offsets(&hdr, desclen, msglen);
+ hdr.type = pkt_type;
+ hdr.flags = need_comp ? VMBUS_PACKET_FLAG_REQUEST_COMPLETION : 0;
+ hdr.transaction_id = transaction_id;
+
+ assert(totlen <= ringbuf->reserved);
+
+ ringbuf_start_io(&ringbuf->common);
+ ringbuf_seek(&ringbuf->common, ringbuf->wr_idx);
+ ret = ringbuf_io(&ringbuf->common, &hdr, sizeof(hdr));
+ if (ret < 0) {
+ goto out;
+ }
+ if (desclen) {
+ assert(desc);
+ ret = ringbuf_io(&ringbuf->common, desc, desclen);
+ if (ret < 0) {
+ goto out;
+ }
+ ringbuf_seek(&ringbuf->common,
+ ringbuf->wr_idx + hdr.offset_qwords * sizeof(uint64_t));
+ }
+ ret = ringbuf_io(&ringbuf->common, msg, msglen);
+ if (ret < 0) {
+ goto out;
+ }
+ ringbuf_seek(&ringbuf->common, ringbuf->wr_idx + totlen);
+ ringbuf->wr_idx = ringbuf_tell(&ringbuf->common);
+ ret = 0;
+out:
+ ringbuf_end_io(&ringbuf->common);
+ if (ret) {
+ return ret;
+ }
+ return ringbuf_send_update_idx(chan);
+}
+
+ssize_t vmbus_channel_send_completion(VMBusChanReq *req,
+ void *msg, uint32_t msglen)
+{
+ assert(req->need_comp);
+ return vmbus_channel_send(req->chan, VMBUS_PACKET_COMP, NULL, 0,
+ msg, msglen, false, req->transaction_id);
+}
+
+static int sgl_from_gpa_ranges(QEMUSGList *sgl, VMBusDevice *dev,
+ VMBusRingBufCommon *ringbuf, uint32_t len)
+{
+ int ret;
+ vmbus_pkt_gpa_direct hdr;
+ hwaddr curaddr = 0;
+ hwaddr curlen = 0;
+ int num;
+
+ if (len < sizeof(hdr)) {
+ return -EIO;
+ }
+ ret = ringbuf_io(ringbuf, &hdr, sizeof(hdr));
+ if (ret < 0) {
+ return ret;
+ }
+ len -= sizeof(hdr);
+
+ num = (len - hdr.rangecount * sizeof(vmbus_gpa_range)) / sizeof(uint64_t);
+ if (num < 0) {
+ return -EIO;
+ }
+ qemu_sglist_init(sgl, DEVICE(dev), num, ringbuf->as);
+
+ for (; hdr.rangecount; hdr.rangecount--) {
+ vmbus_gpa_range range;
+
+ if (len < sizeof(range)) {
+ goto eio;
+ }
+ ret = ringbuf_io(ringbuf, &range, sizeof(range));
+ if (ret < 0) {
+ goto err;
+ }
+ len -= sizeof(range);
+
+ if (range.byte_offset & TARGET_PAGE_MASK) {
+ goto eio;
+ }
+
+ for (; range.byte_count; range.byte_offset = 0) {
+ uint64_t paddr;
+ uint32_t plen = MIN(range.byte_count,
+ TARGET_PAGE_SIZE - range.byte_offset);
+
+ if (len < sizeof(uint64_t)) {
+ goto eio;
+ }
+ ret = ringbuf_io(ringbuf, &paddr, sizeof(paddr));
+ if (ret < 0) {
+ goto err;
+ }
+ len -= sizeof(uint64_t);
+ paddr <<= TARGET_PAGE_BITS;
+ paddr |= range.byte_offset;
+ range.byte_count -= plen;
+
+ if (curaddr + curlen == paddr) {
+ /* consecutive fragments - join */
+ curlen += plen;
+ } else {
+ if (curlen) {
+ qemu_sglist_add(sgl, curaddr, curlen);
+ }
+
+ curaddr = paddr;
+ curlen = plen;
+ }
+ }
+ }
+
+ if (curlen) {
+ qemu_sglist_add(sgl, curaddr, curlen);
+ }
+
+ return 0;
+eio:
+ ret = -EIO;
+err:
+ qemu_sglist_destroy(sgl);
+ return ret;
+}
+
+static VMBusChanReq *vmbus_alloc_req(VMBusChannel *chan,
+ uint32_t size, uint16_t pkt_type,
+ uint32_t msglen, uint64_t transaction_id,
+ bool need_comp)
+{
+ VMBusChanReq *req;
+ uint32_t msgoff = QEMU_ALIGN_UP(size, __alignof__(*req->msg));
+ uint32_t totlen = msgoff + msglen;
+
+ req = g_malloc0(totlen);
+ req->chan = chan;
+ req->pkt_type = pkt_type;
+ req->msg = (void *)req + msgoff;
+ req->msglen = msglen;
+ req->transaction_id = transaction_id;
+ req->need_comp = need_comp;
+ return req;
+}
+
+int vmbus_channel_recv_start(VMBusChannel *chan)
+{
+ VMBusRecvRingBuf *ringbuf = &chan->recv_ringbuf;
+ vmbus_ring_buffer *rb;
+
+ rb = ringbuf_map_hdr(&ringbuf->common);
+ if (!rb) {
+ return -EFAULT;
+ }
+ ringbuf->last_seen_wr_idx = rb->write_index;
+ ringbuf_unmap_hdr(&ringbuf->common, rb, false);
+
+ if (ringbuf->last_seen_wr_idx >= ringbuf->common.len) {
+ return -EOVERFLOW;
+ }
+
+ /* prevent reorder of the following data operation with write_index read */
+ smp_mb(); /* barrier pair [C] */
+ return 0;
+}
+
+void *vmbus_channel_recv_peek(VMBusChannel *chan, uint32_t size)
+{
+ VMBusRecvRingBuf *ringbuf = &chan->recv_ringbuf;
+ vmbus_packet_hdr hdr = {};
+ VMBusChanReq *req;
+ uint32_t avail;
+ uint32_t totlen, pktlen, msglen, msgoff, desclen;
+
+ assert(size >= sizeof(*req));
+
+ /* safe as last_seen_wr_idx is validated in vmbus_channel_recv_start */
+ avail = rb_idx_delta(ringbuf->rd_idx, ringbuf->last_seen_wr_idx,
+ ringbuf->common.len, true);
+ if (avail < sizeof(hdr)) {
+ return NULL;
+ }
+
+ ringbuf_seek(&ringbuf->common, ringbuf->rd_idx);
+ if (ringbuf_io(&ringbuf->common, &hdr, sizeof(hdr)) < 0) {
+ return NULL;
+ }
+
+ pktlen = hdr.len_qwords * sizeof(uint64_t);
+ totlen = pktlen + VMBUS_PKT_TRAILER;
+ if (totlen > avail) {
+ return NULL;
+ }
+
+ msgoff = hdr.offset_qwords * sizeof(uint64_t);
+ if (msgoff > pktlen || msgoff < sizeof(hdr)) {
+ error_report("%s: malformed packet: %u %u", __func__, msgoff, pktlen);
+ return NULL;
+ }
+
+ msglen = pktlen - msgoff;
+
+ req = vmbus_alloc_req(chan, size, hdr.type, msglen, hdr.transaction_id,
+ hdr.flags & VMBUS_PACKET_FLAG_REQUEST_COMPLETION);
+
+ switch (hdr.type) {
+ case VMBUS_PACKET_DATA_USING_GPA_DIRECT:
+ desclen = msgoff - sizeof(hdr);
+ if (sgl_from_gpa_ranges(&req->sgl, chan->dev, &ringbuf->common,
+ desclen) < 0) {
+ error_report("%s: failed to convert GPA ranges to SGL", __func__);
+ goto free_req;
+ }
+ break;
+ case VMBUS_PACKET_DATA_INBAND:
+ case VMBUS_PACKET_COMP:
+ break;
+ default:
+ error_report("%s: unexpected msg type: %x", __func__, hdr.type);
+ goto free_req;
+ }
+
+ ringbuf_seek(&ringbuf->common, ringbuf->rd_idx + msgoff);
+ if (ringbuf_io(&ringbuf->common, req->msg, msglen) < 0) {
+ goto free_req;
+ }
+ ringbuf_seek(&ringbuf->common, ringbuf->rd_idx + totlen);
+
+ return req;
+free_req:
+ vmbus_free_req(req);
+ return NULL;
+}
+
+void vmbus_channel_recv_pop(VMBusChannel *chan)
+{
+ VMBusRecvRingBuf *ringbuf = &chan->recv_ringbuf;
+ ringbuf->rd_idx = ringbuf_tell(&ringbuf->common);
+}
+
+ssize_t vmbus_channel_recv_done(VMBusChannel *chan)
+{
+ VMBusRecvRingBuf *ringbuf = &chan->recv_ringbuf;
+ vmbus_ring_buffer *rb;
+ uint32_t read;
+
+ read = rb_idx_delta(ringbuf->last_rd_idx, ringbuf->rd_idx,
+ ringbuf->common.len, true);
+ if (!read) {
+ return 0;
+ }
+
+ rb = ringbuf_map_hdr(&ringbuf->common);
+ if (!rb) {
+ return -EFAULT;
+ }
+
+ /* prevent reorder with the data operation and packet read */
+ smp_mb(); /* barrier pair [B] */
+ rb->read_index = ringbuf->rd_idx;
+
+ /* prevent reorder of the following pending_send_sz read */
+ smp_mb(); /* barrier pair [A] */
+
+ if (rb->interrupt_mask) {
+ goto out;
+ }
+
+ if (rb->feature_bits & VMBUS_RING_BUFFER_FEAT_PENDING_SZ) {
+ uint32_t wr_idx, wr_avail;
+ uint32_t wanted = rb->pending_send_sz;
+
+ if (!wanted) {
+ goto out;
+ }
+
+ /* prevent reorder with pending_send_sz read */
+ smp_rmb(); /* barrier pair [D] */
+ wr_idx = rb->write_index;
+
+ wr_avail = rb_idx_delta(wr_idx, ringbuf->rd_idx, ringbuf->common.len,
+ true);
+
+ /* the producer wasn't blocked on the consumer state */
+ if (wr_avail >= read + wanted) {
+ goto out;
+ }
+ /* there's not enough space for the producer to make progress */
+ if (wr_avail < wanted) {
+ goto out;
+ }
+ }
+
+ vmbus_channel_notify_guest(chan);
+out:
+ ringbuf_unmap_hdr(&ringbuf->common, rb, true);
+ ringbuf->last_rd_idx = ringbuf->rd_idx;
+ return read;
+}
+
+void vmbus_free_req(void *req)
+{
+ VMBusChanReq *r = req;
+
+ if (!req) {
+ return;
+ }
+
+ if (r->sgl.dev) {
+ qemu_sglist_destroy(&r->sgl);
+ }
+ g_free(req);
+}
+
+static const VMStateDescription vmstate_sgent = {
+ .name = "vmbus/sgentry",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(base, ScatterGatherEntry),
+ VMSTATE_UINT64(len, ScatterGatherEntry),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+typedef struct VMBusChanReqSave {
+ uint16_t chan_idx;
+ uint16_t pkt_type;
+ uint32_t msglen;
+ void *msg;
+ uint64_t transaction_id;
+ bool need_comp;
+ uint32_t num;
+ ScatterGatherEntry *sgl;
+} VMBusChanReqSave;
+
+static const VMStateDescription vmstate_vmbus_chan_req = {
+ .name = "vmbus/vmbus_chan_req",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(chan_idx, VMBusChanReqSave),
+ VMSTATE_UINT16(pkt_type, VMBusChanReqSave),
+ VMSTATE_UINT32(msglen, VMBusChanReqSave),
+ VMSTATE_VBUFFER_ALLOC_UINT32(msg, VMBusChanReqSave, 0, NULL, msglen),
+ VMSTATE_UINT64(transaction_id, VMBusChanReqSave),
+ VMSTATE_BOOL(need_comp, VMBusChanReqSave),
+ VMSTATE_UINT32(num, VMBusChanReqSave),
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT32(sgl, VMBusChanReqSave, num,
+ vmstate_sgent, ScatterGatherEntry),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+void vmbus_save_req(QEMUFile *f, VMBusChanReq *req)
+{
+ VMBusChanReqSave req_save;
+
+ req_save.chan_idx = req->chan->subchan_idx;
+ req_save.pkt_type = req->pkt_type;
+ req_save.msglen = req->msglen;
+ req_save.msg = req->msg;
+ req_save.transaction_id = req->transaction_id;
+ req_save.need_comp = req->need_comp;
+ req_save.num = req->sgl.nsg;
+ req_save.sgl = g_memdup(req->sgl.sg,
+ req_save.num * sizeof(ScatterGatherEntry));
+
+ vmstate_save_state(f, &vmstate_vmbus_chan_req, &req_save, NULL);
+
+ g_free(req_save.sgl);
+}
+
+void *vmbus_load_req(QEMUFile *f, VMBusDevice *dev, uint32_t size)
+{
+ VMBusChanReqSave req_save;
+ VMBusChanReq *req = NULL;
+ VMBusChannel *chan = NULL;
+ uint32_t i;
+
+ vmstate_load_state(f, &vmstate_vmbus_chan_req, &req_save, 0);
+
+ if (req_save.chan_idx >= dev->num_channels) {
+ error_report("%s: %u(chan_idx) > %u(num_channels)", __func__,
+ req_save.chan_idx, dev->num_channels);
+ goto out;
+ }
+ chan = &dev->channels[req_save.chan_idx];
+
+ if (vmbus_channel_reserve(chan, 0, req_save.msglen)) {
+ goto out;
+ }
+
+ req = vmbus_alloc_req(chan, size, req_save.pkt_type, req_save.msglen,
+ req_save.transaction_id, req_save.need_comp);
+ if (req_save.msglen) {
+ memcpy(req->msg, req_save.msg, req_save.msglen);
+ }
+
+ for (i = 0; i < req_save.num; i++) {
+ qemu_sglist_add(&req->sgl, req_save.sgl[i].base, req_save.sgl[i].len);
+ }
+
+out:
+ if (req_save.msglen) {
+ g_free(req_save.msg);
+ }
+ if (req_save.num) {
+ g_free(req_save.sgl);
+ }
+ return req;
+}
+
+static void channel_event_cb(EventNotifier *e)
+{
+ VMBusChannel *chan = container_of(e, VMBusChannel, notifier);
+ if (event_notifier_test_and_clear(e)) {
+ /*
+ * All receives are supposed to happen within the device worker, so
+ * bracket it with ringbuf_start/end_io on the receive ringbuffer, and
+ * potentially reuse the cached mapping throughout the worker.
+ * Can't do this for sends as they may happen outside the device
+ * worker.
+ */
+ VMBusRecvRingBuf *ringbuf = &chan->recv_ringbuf;
+ ringbuf_start_io(&ringbuf->common);
+ chan->notify_cb(chan);
+ ringbuf_end_io(&ringbuf->common);
+
+ }
+}
+
+static int alloc_chan_id(VMBus *vmbus)
+{
+ int ret;
+
+ ret = find_next_zero_bit(vmbus->chanid_bitmap, VMBUS_CHANID_COUNT, 0);
+ if (ret == VMBUS_CHANID_COUNT) {
+ return -ENOMEM;
+ }
+ return ret + VMBUS_FIRST_CHANID;
+}
+
+static int register_chan_id(VMBusChannel *chan)
+{
+ return test_and_set_bit(chan->id - VMBUS_FIRST_CHANID,
+ chan->vmbus->chanid_bitmap) ? -EEXIST : 0;
+}
+
+static void unregister_chan_id(VMBusChannel *chan)
+{
+ clear_bit(chan->id - VMBUS_FIRST_CHANID, chan->vmbus->chanid_bitmap);
+}
+
+static uint32_t chan_connection_id(VMBusChannel *chan)
+{
+ return VMBUS_CHAN_CONNECTION_OFFSET + chan->id;
+}
+
+static void init_channel(VMBus *vmbus, VMBusDevice *dev, VMBusDeviceClass *vdc,
+ VMBusChannel *chan, uint16_t idx, Error **errp)
+{
+ int res;
+
+ chan->dev = dev;
+ chan->notify_cb = vdc->chan_notify_cb;
+ chan->subchan_idx = idx;
+ chan->vmbus = vmbus;
+
+ res = alloc_chan_id(vmbus);
+ if (res < 0) {
+ error_setg(errp, "no spare channel id");
+ return;
+ }
+ chan->id = res;
+ register_chan_id(chan);
+
+ /*
+ * The guest drivers depend on the device subchannels (idx #1+) to be
+ * offered after the primary channel (idx #0) of that device. To ensure
+ * that, record the channels on the channel list in the order they appear
+ * within the device.
+ */
+ QTAILQ_INSERT_TAIL(&vmbus->channel_list, chan, link);
+}
+
+static void deinit_channel(VMBusChannel *chan)
+{
+ assert(chan->state == VMCHAN_INIT);
+ QTAILQ_REMOVE(&chan->vmbus->channel_list, chan, link);
+ unregister_chan_id(chan);
+}
+
+static void create_channels(VMBus *vmbus, VMBusDevice *dev, Error **errp)
+{
+ uint16_t i;
+ VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(dev);
+ Error *err = NULL;
+
+ dev->num_channels = vdc->num_channels ? vdc->num_channels(dev) : 1;
+ if (dev->num_channels < 1) {
+ error_setg(&err, "invalid #channels: %u", dev->num_channels);
+ goto error_out;
+ }
+
+ dev->channels = g_new0(VMBusChannel, dev->num_channels);
+ for (i = 0; i < dev->num_channels; i++) {
+ init_channel(vmbus, dev, vdc, &dev->channels[i], i, &err);
+ if (err) {
+ goto err_init;
+ }
+ }
+
+ return;
+
+err_init:
+ while (i--) {
+ deinit_channel(&dev->channels[i]);
+ }
+error_out:
+ error_propagate(errp, err);
+}
+
+static void free_channels(VMBusDevice *dev)
+{
+ uint16_t i;
+ for (i = 0; i < dev->num_channels; i++) {
+ deinit_channel(&dev->channels[i]);
+ }
+ g_free(dev->channels);
+}
+
+static HvSintRoute *make_sint_route(VMBus *vmbus, uint32_t vp_index)
+{
+ VMBusChannel *chan;
+
+ if (vp_index == vmbus->target_vp) {
+ hyperv_sint_route_ref(vmbus->sint_route);
+ return vmbus->sint_route;
+ }
+
+ QTAILQ_FOREACH(chan, &vmbus->channel_list, link) {
+ if (chan->target_vp == vp_index && vmbus_channel_is_open(chan)) {
+ hyperv_sint_route_ref(chan->notify_route);
+ return chan->notify_route;
+ }
+ }
+
+ return hyperv_sint_route_new(vp_index, VMBUS_SINT, NULL, NULL);
+}
+
+static void open_channel(VMBusChannel *chan)
+{
+ VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(chan->dev);
+
+ chan->gpadl = vmbus_get_gpadl(chan, chan->ringbuf_gpadl);
+ if (!chan->gpadl) {
+ return;
+ }
+
+ if (ringbufs_init(chan)) {
+ goto put_gpadl;
+ }
+
+ if (event_notifier_init(&chan->notifier, 0)) {
+ goto put_gpadl;
+ }
+
+ event_notifier_set_handler(&chan->notifier, channel_event_cb);
+
+ if (hyperv_set_event_flag_handler(chan_connection_id(chan),
+ &chan->notifier)) {
+ goto cleanup_notifier;
+ }
+
+ chan->notify_route = make_sint_route(chan->vmbus, chan->target_vp);
+ if (!chan->notify_route) {
+ goto clear_event_flag_handler;
+ }
+
+ if (vdc->open_channel && vdc->open_channel(chan)) {
+ goto unref_sint_route;
+ }
+
+ chan->is_open = true;
+ return;
+
+unref_sint_route:
+ hyperv_sint_route_unref(chan->notify_route);
+clear_event_flag_handler:
+ hyperv_set_event_flag_handler(chan_connection_id(chan), NULL);
+cleanup_notifier:
+ event_notifier_set_handler(&chan->notifier, NULL);
+ event_notifier_cleanup(&chan->notifier);
+put_gpadl:
+ vmbus_put_gpadl(chan->gpadl);
+}
+
+static void close_channel(VMBusChannel *chan)
+{
+ VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(chan->dev);
+
+ if (!chan->is_open) {
+ return;
+ }
+
+ if (vdc->close_channel) {
+ vdc->close_channel(chan);
+ }
+
+ hyperv_sint_route_unref(chan->notify_route);
+ hyperv_set_event_flag_handler(chan_connection_id(chan), NULL);
+ event_notifier_set_handler(&chan->notifier, NULL);
+ event_notifier_cleanup(&chan->notifier);
+ vmbus_put_gpadl(chan->gpadl);
+ chan->is_open = false;
+}
+
+static int channel_post_load(void *opaque, int version_id)
+{
+ VMBusChannel *chan = opaque;
+
+ return register_chan_id(chan);
+}
+
+static const VMStateDescription vmstate_channel = {
+ .name = "vmbus/channel",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .post_load = channel_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(id, VMBusChannel),
+ VMSTATE_UINT16(subchan_idx, VMBusChannel),
+ VMSTATE_UINT32(open_id, VMBusChannel),
+ VMSTATE_UINT32(target_vp, VMBusChannel),
+ VMSTATE_UINT32(ringbuf_gpadl, VMBusChannel),
+ VMSTATE_UINT32(ringbuf_send_offset, VMBusChannel),
+ VMSTATE_UINT8(offer_state, VMBusChannel),
+ VMSTATE_UINT8(state, VMBusChannel),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static VMBusChannel *find_channel(VMBus *vmbus, uint32_t id)
+{
+ VMBusChannel *chan;
+ QTAILQ_FOREACH(chan, &vmbus->channel_list, link) {
+ if (chan->id == id) {
+ return chan;
+ }
+ }
+ return NULL;
+}
+
+static int enqueue_incoming_message(VMBus *vmbus,
+ const struct hyperv_post_message_input *msg)
+{
+ int ret = 0;
+ uint8_t idx, prev_size;
+
+ qemu_mutex_lock(&vmbus->rx_queue_lock);
+
+ if (vmbus->rx_queue_size == HV_MSG_QUEUE_LEN) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ prev_size = vmbus->rx_queue_size;
+ idx = (vmbus->rx_queue_head + vmbus->rx_queue_size) % HV_MSG_QUEUE_LEN;
+ memcpy(&vmbus->rx_queue[idx], msg, sizeof(*msg));
+ vmbus->rx_queue_size++;
+
+ /* only need to resched if the queue was empty before */
+ if (!prev_size) {
+ vmbus_resched(vmbus);
+ }
+out:
+ qemu_mutex_unlock(&vmbus->rx_queue_lock);
+ return ret;
+}
+
+static uint16_t vmbus_recv_message(const struct hyperv_post_message_input *msg,
+ void *data)
+{
+ VMBus *vmbus = data;
+ struct vmbus_message_header *vmbus_msg;
+
+ if (msg->message_type != HV_MESSAGE_VMBUS) {
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
+
+ if (msg->payload_size < sizeof(struct vmbus_message_header)) {
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
+
+ vmbus_msg = (struct vmbus_message_header *)msg->payload;
+
+ trace_vmbus_recv_message(vmbus_msg->message_type, msg->payload_size);
+
+ if (vmbus_msg->message_type == VMBUS_MSG_INVALID ||
+ vmbus_msg->message_type >= VMBUS_MSG_COUNT) {
+ error_report("vmbus: unknown message type %#x",
+ vmbus_msg->message_type);
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
+
+ if (enqueue_incoming_message(vmbus, msg)) {
+ return HV_STATUS_INSUFFICIENT_BUFFERS;
+ }
+ return HV_STATUS_SUCCESS;
+}
+
+static bool vmbus_initialized(VMBus *vmbus)
+{
+ return vmbus->version > 0 && vmbus->version <= VMBUS_VERSION_CURRENT;
+}
+
+static void vmbus_reset_all(VMBus *vmbus)
+{
+ qbus_reset_all(BUS(vmbus));
+}
+
+static void post_msg(VMBus *vmbus, void *msgdata, uint32_t msglen)
+{
+ int ret;
+ struct hyperv_message msg = {
+ .header.message_type = HV_MESSAGE_VMBUS,
+ };
+
+ assert(!vmbus->msg_in_progress);
+ assert(msglen <= sizeof(msg.payload));
+ assert(msglen >= sizeof(struct vmbus_message_header));
+
+ vmbus->msg_in_progress = true;
+
+ trace_vmbus_post_msg(((struct vmbus_message_header *)msgdata)->message_type,
+ msglen);
+
+ memcpy(msg.payload, msgdata, msglen);
+ msg.header.payload_size = ROUND_UP(msglen, VMBUS_MESSAGE_SIZE_ALIGN);
+
+ ret = hyperv_post_msg(vmbus->sint_route, &msg);
+ if (ret == 0 || ret == -EAGAIN) {
+ return;
+ }
+
+ error_report("message delivery fatal failure: %d; aborting vmbus", ret);
+ vmbus_reset_all(vmbus);
+}
+
+static int vmbus_init(VMBus *vmbus)
+{
+ if (vmbus->target_vp != (uint32_t)-1) {
+ vmbus->sint_route = hyperv_sint_route_new(vmbus->target_vp, VMBUS_SINT,
+ vmbus_msg_cb, vmbus);
+ if (!vmbus->sint_route) {
+ error_report("failed to set up SINT route");
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static void vmbus_deinit(VMBus *vmbus)
+{
+ VMBusGpadl *gpadl, *tmp_gpadl;
+ VMBusChannel *chan;
+
+ QTAILQ_FOREACH_SAFE(gpadl, &vmbus->gpadl_list, link, tmp_gpadl) {
+ if (gpadl->state == VMGPADL_TORNDOWN) {
+ continue;
+ }
+ vmbus_put_gpadl(gpadl);
+ }
+
+ QTAILQ_FOREACH(chan, &vmbus->channel_list, link) {
+ chan->offer_state = VMOFFER_INIT;
+ }
+
+ hyperv_sint_route_unref(vmbus->sint_route);
+ vmbus->sint_route = NULL;
+ vmbus->int_page_gpa = 0;
+ vmbus->target_vp = (uint32_t)-1;
+ vmbus->version = 0;
+ vmbus->state = VMBUS_LISTEN;
+ vmbus->msg_in_progress = false;
+}
+
+static void handle_initiate_contact(VMBus *vmbus,
+ vmbus_message_initiate_contact *msg,
+ uint32_t msglen)
+{
+ if (msglen < sizeof(*msg)) {
+ return;
+ }
+
+ trace_vmbus_initiate_contact(msg->version_requested >> 16,
+ msg->version_requested & 0xffff,
+ msg->target_vcpu, msg->monitor_page1,
+ msg->monitor_page2, msg->interrupt_page);
+
+ /*
+ * Reset vmbus on INITIATE_CONTACT regardless of its previous state.
+ * Useful, in particular, with vmbus-aware BIOS which can't shut vmbus down
+ * before handing over to OS loader.
+ */
+ vmbus_reset_all(vmbus);
+
+ vmbus->target_vp = msg->target_vcpu;
+ vmbus->version = msg->version_requested;
+ if (vmbus->version < VMBUS_VERSION_WIN8) {
+ /* linux passes interrupt page even when it doesn't need it */
+ vmbus->int_page_gpa = msg->interrupt_page;
+ }
+ vmbus->state = VMBUS_HANDSHAKE;
+
+ if (vmbus_init(vmbus)) {
+ error_report("failed to init vmbus; aborting");
+ vmbus_deinit(vmbus);
+ return;
+ }
+}
+
+static void send_handshake(VMBus *vmbus)
+{
+ struct vmbus_message_version_response msg = {
+ .header.message_type = VMBUS_MSG_VERSION_RESPONSE,
+ .version_supported = vmbus_initialized(vmbus),
+ };
+
+ post_msg(vmbus, &msg, sizeof(msg));
+}
+
+static void handle_request_offers(VMBus *vmbus, void *msgdata, uint32_t msglen)
+{
+ VMBusChannel *chan;
+
+ if (!vmbus_initialized(vmbus)) {
+ return;
+ }
+
+ QTAILQ_FOREACH(chan, &vmbus->channel_list, link) {
+ if (chan->offer_state == VMOFFER_INIT) {
+ chan->offer_state = VMOFFER_SENDING;
+ break;
+ }
+ }
+
+ vmbus->state = VMBUS_OFFER;
+}
+
+static void send_offer(VMBus *vmbus)
+{
+ VMBusChannel *chan;
+ struct vmbus_message_header alloffers_msg = {
+ .message_type = VMBUS_MSG_ALLOFFERS_DELIVERED,
+ };
+
+ QTAILQ_FOREACH(chan, &vmbus->channel_list, link) {
+ if (chan->offer_state == VMOFFER_SENDING) {
+ VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(chan->dev);
+ /* Hyper-V wants LE GUIDs */
+ QemuUUID classid = qemu_uuid_bswap(vdc->classid);
+ QemuUUID instanceid = qemu_uuid_bswap(chan->dev->instanceid);
+ struct vmbus_message_offer_channel msg = {
+ .header.message_type = VMBUS_MSG_OFFERCHANNEL,
+ .child_relid = chan->id,
+ .connection_id = chan_connection_id(chan),
+ .channel_flags = vdc->channel_flags,
+ .mmio_size_mb = vdc->mmio_size_mb,
+ .sub_channel_index = vmbus_channel_idx(chan),
+ .interrupt_flags = VMBUS_OFFER_INTERRUPT_DEDICATED,
+ };
+
+ memcpy(msg.type_uuid, &classid, sizeof(classid));
+ memcpy(msg.instance_uuid, &instanceid, sizeof(instanceid));
+
+ trace_vmbus_send_offer(chan->id, chan->dev);
+
+ post_msg(vmbus, &msg, sizeof(msg));
+ return;
+ }
+ }
+
+ /* no more offers, send terminator message */
+ trace_vmbus_terminate_offers();
+ post_msg(vmbus, &alloffers_msg, sizeof(alloffers_msg));
+}
+
+static bool complete_offer(VMBus *vmbus)
+{
+ VMBusChannel *chan;
+
+ QTAILQ_FOREACH(chan, &vmbus->channel_list, link) {
+ if (chan->offer_state == VMOFFER_SENDING) {
+ chan->offer_state = VMOFFER_SENT;
+ goto next_offer;
+ }
+ }
+ /*
+ * no transitioning channels found so this is completing the terminator
+ * message, and vmbus can move to the next state
+ */
+ return true;
+
+next_offer:
+ /* try to mark another channel for offering */
+ QTAILQ_FOREACH(chan, &vmbus->channel_list, link) {
+ if (chan->offer_state == VMOFFER_INIT) {
+ chan->offer_state = VMOFFER_SENDING;
+ break;
+ }
+ }
+ /*
+ * if an offer has been sent there are more offers or the terminator yet to
+ * send, so no state transition for vmbus
+ */
+ return false;
+}
+
+
+static void handle_gpadl_header(VMBus *vmbus, vmbus_message_gpadl_header *msg,
+ uint32_t msglen)
+{
+ VMBusGpadl *gpadl;
+ uint32_t num_gfns, i;
+
+ /* must include at least one gpa range */
+ if (msglen < sizeof(*msg) + sizeof(msg->range[0]) ||
+ !vmbus_initialized(vmbus)) {
+ return;
+ }
+
+ num_gfns = (msg->range_buflen - msg->rangecount * sizeof(msg->range[0])) /
+ sizeof(msg->range[0].pfn_array[0]);
+
+ trace_vmbus_gpadl_header(msg->gpadl_id, num_gfns);
+
+ /*
+ * In theory the GPADL_HEADER message can define a GPADL with multiple GPA
+ * ranges each with arbitrary size and alignment. However in practice only
+ * single-range page-aligned GPADLs have been observed so just ignore
+ * anything else and simplify things greatly.
+ */
+ if (msg->rangecount != 1 || msg->range[0].byte_offset ||
+ (msg->range[0].byte_count != (num_gfns << TARGET_PAGE_BITS))) {
+ return;
+ }
+
+ /* ignore requests to create already existing GPADLs */
+ if (find_gpadl(vmbus, msg->gpadl_id)) {
+ return;
+ }
+
+ gpadl = create_gpadl(vmbus, msg->gpadl_id, msg->child_relid, num_gfns);
+
+ for (i = 0; i < num_gfns &&
+ (void *)&msg->range[0].pfn_array[i + 1] <= (void *)msg + msglen;
+ i++) {
+ gpadl->gfns[gpadl->seen_gfns++] = msg->range[0].pfn_array[i];
+ }
+
+ if (gpadl_full(gpadl)) {
+ vmbus->state = VMBUS_CREATE_GPADL;
+ }
+}
+
+static void handle_gpadl_body(VMBus *vmbus, vmbus_message_gpadl_body *msg,
+ uint32_t msglen)
+{
+ VMBusGpadl *gpadl;
+ uint32_t num_gfns_left, i;
+
+ if (msglen < sizeof(*msg) || !vmbus_initialized(vmbus)) {
+ return;
+ }
+
+ trace_vmbus_gpadl_body(msg->gpadl_id);
+
+ gpadl = find_gpadl(vmbus, msg->gpadl_id);
+ if (!gpadl) {
+ return;
+ }
+
+ num_gfns_left = gpadl->num_gfns - gpadl->seen_gfns;
+ assert(num_gfns_left);
+
+ for (i = 0; i < num_gfns_left &&
+ (void *)&msg->pfn_array[i + 1] <= (void *)msg + msglen; i++) {
+ gpadl->gfns[gpadl->seen_gfns++] = msg->pfn_array[i];
+ }
+
+ if (gpadl_full(gpadl)) {
+ vmbus->state = VMBUS_CREATE_GPADL;
+ }
+}
+
+static void send_create_gpadl(VMBus *vmbus)
+{
+ VMBusGpadl *gpadl;
+
+ QTAILQ_FOREACH(gpadl, &vmbus->gpadl_list, link) {
+ if (gpadl_full(gpadl) && gpadl->state == VMGPADL_INIT) {
+ struct vmbus_message_gpadl_created msg = {
+ .header.message_type = VMBUS_MSG_GPADL_CREATED,
+ .gpadl_id = gpadl->id,
+ .child_relid = gpadl->child_relid,
+ };
+
+ trace_vmbus_gpadl_created(gpadl->id);
+ post_msg(vmbus, &msg, sizeof(msg));
+ return;
+ }
+ }
+
+ assert(false);
+}
+
+static bool complete_create_gpadl(VMBus *vmbus)
+{
+ VMBusGpadl *gpadl;
+
+ QTAILQ_FOREACH(gpadl, &vmbus->gpadl_list, link) {
+ if (gpadl_full(gpadl) && gpadl->state == VMGPADL_INIT) {
+ gpadl->state = VMGPADL_ALIVE;
+
+ return true;
+ }
+ }
+
+ assert(false);
+ return false;
+}
+
+static void handle_gpadl_teardown(VMBus *vmbus,
+ vmbus_message_gpadl_teardown *msg,
+ uint32_t msglen)
+{
+ VMBusGpadl *gpadl;
+
+ if (msglen < sizeof(*msg) || !vmbus_initialized(vmbus)) {
+ return;
+ }
+
+ trace_vmbus_gpadl_teardown(msg->gpadl_id);
+
+ gpadl = find_gpadl(vmbus, msg->gpadl_id);
+ if (!gpadl || gpadl->state == VMGPADL_TORNDOWN) {
+ return;
+ }
+
+ gpadl->state = VMGPADL_TEARINGDOWN;
+ vmbus->state = VMBUS_TEARDOWN_GPADL;
+}
+
+static void send_teardown_gpadl(VMBus *vmbus)
+{
+ VMBusGpadl *gpadl;
+
+ QTAILQ_FOREACH(gpadl, &vmbus->gpadl_list, link) {
+ if (gpadl->state == VMGPADL_TEARINGDOWN) {
+ struct vmbus_message_gpadl_torndown msg = {
+ .header.message_type = VMBUS_MSG_GPADL_TORNDOWN,
+ .gpadl_id = gpadl->id,
+ };
+
+ trace_vmbus_gpadl_torndown(gpadl->id);
+ post_msg(vmbus, &msg, sizeof(msg));
+ return;
+ }
+ }
+
+ assert(false);
+}
+
+static bool complete_teardown_gpadl(VMBus *vmbus)
+{
+ VMBusGpadl *gpadl;
+
+ QTAILQ_FOREACH(gpadl, &vmbus->gpadl_list, link) {
+ if (gpadl->state == VMGPADL_TEARINGDOWN) {
+ gpadl->state = VMGPADL_TORNDOWN;
+ vmbus_put_gpadl(gpadl);
+ return true;
+ }
+ }
+
+ assert(false);
+ return false;
+}
+
+static void handle_open_channel(VMBus *vmbus, vmbus_message_open_channel *msg,
+ uint32_t msglen)
+{
+ VMBusChannel *chan;
+
+ if (msglen < sizeof(*msg) || !vmbus_initialized(vmbus)) {
+ return;
+ }
+
+ trace_vmbus_open_channel(msg->child_relid, msg->ring_buffer_gpadl_id,
+ msg->target_vp);
+ chan = find_channel(vmbus, msg->child_relid);
+ if (!chan || chan->state != VMCHAN_INIT) {
+ return;
+ }
+
+ chan->ringbuf_gpadl = msg->ring_buffer_gpadl_id;
+ chan->ringbuf_send_offset = msg->ring_buffer_offset;
+ chan->target_vp = msg->target_vp;
+ chan->open_id = msg->open_id;
+
+ open_channel(chan);
+
+ chan->state = VMCHAN_OPENING;
+ vmbus->state = VMBUS_OPEN_CHANNEL;
+}
+
+static void send_open_channel(VMBus *vmbus)
+{
+ VMBusChannel *chan;
+
+ QTAILQ_FOREACH(chan, &vmbus->channel_list, link) {
+ if (chan->state == VMCHAN_OPENING) {
+ struct vmbus_message_open_result msg = {
+ .header.message_type = VMBUS_MSG_OPENCHANNEL_RESULT,
+ .child_relid = chan->id,
+ .open_id = chan->open_id,
+ .status = !vmbus_channel_is_open(chan),
+ };
+
+ trace_vmbus_channel_open(chan->id, msg.status);
+ post_msg(vmbus, &msg, sizeof(msg));
+ return;
+ }
+ }
+
+ assert(false);
+}
+
+static bool complete_open_channel(VMBus *vmbus)
+{
+ VMBusChannel *chan;
+
+ QTAILQ_FOREACH(chan, &vmbus->channel_list, link) {
+ if (chan->state == VMCHAN_OPENING) {
+ if (vmbus_channel_is_open(chan)) {
+ chan->state = VMCHAN_OPEN;
+ /*
+ * simulate guest notification of ringbuffer space made
+ * available, for the channel protocols where the host
+ * initiates the communication
+ */
+ vmbus_channel_notify_host(chan);
+ } else {
+ chan->state = VMCHAN_INIT;
+ }
+ return true;
+ }
+ }
+
+ assert(false);
+ return false;
+}
+
+static void vdev_reset_on_close(VMBusDevice *vdev)
+{
+ uint16_t i;
+
+ for (i = 0; i < vdev->num_channels; i++) {
+ if (vmbus_channel_is_open(&vdev->channels[i])) {
+ return;
+ }
+ }
+
+ /* all channels closed -- reset device */
+ qdev_reset_all(DEVICE(vdev));
+}
+
+static void handle_close_channel(VMBus *vmbus, vmbus_message_close_channel *msg,
+ uint32_t msglen)
+{
+ VMBusChannel *chan;
+
+ if (msglen < sizeof(*msg) || !vmbus_initialized(vmbus)) {
+ return;
+ }
+
+ trace_vmbus_close_channel(msg->child_relid);
+
+ chan = find_channel(vmbus, msg->child_relid);
+ if (!chan) {
+ return;
+ }
+
+ close_channel(chan);
+ chan->state = VMCHAN_INIT;
+
+ vdev_reset_on_close(chan->dev);
+}
+
+static void handle_unload(VMBus *vmbus, void *msg, uint32_t msglen)
+{
+ vmbus->state = VMBUS_UNLOAD;
+}
+
+static void send_unload(VMBus *vmbus)
+{
+ vmbus_message_header msg = {
+ .message_type = VMBUS_MSG_UNLOAD_RESPONSE,
+ };
+
+ qemu_mutex_lock(&vmbus->rx_queue_lock);
+ vmbus->rx_queue_size = 0;
+ qemu_mutex_unlock(&vmbus->rx_queue_lock);
+
+ post_msg(vmbus, &msg, sizeof(msg));
+ return;
+}
+
+static bool complete_unload(VMBus *vmbus)
+{
+ vmbus_reset_all(vmbus);
+ return true;
+}
+
+static void process_message(VMBus *vmbus)
+{
+ struct hyperv_post_message_input *hv_msg;
+ struct vmbus_message_header *msg;
+ void *msgdata;
+ uint32_t msglen;
+
+ qemu_mutex_lock(&vmbus->rx_queue_lock);
+
+ if (!vmbus->rx_queue_size) {
+ goto unlock;
+ }
+
+ hv_msg = &vmbus->rx_queue[vmbus->rx_queue_head];
+ msglen = hv_msg->payload_size;
+ if (msglen < sizeof(*msg)) {
+ goto out;
+ }
+ msgdata = hv_msg->payload;
+ msg = (struct vmbus_message_header *)msgdata;
+
+ trace_vmbus_process_incoming_message(msg->message_type);
+
+ switch (msg->message_type) {
+ case VMBUS_MSG_INITIATE_CONTACT:
+ handle_initiate_contact(vmbus, msgdata, msglen);
+ break;
+ case VMBUS_MSG_REQUESTOFFERS:
+ handle_request_offers(vmbus, msgdata, msglen);
+ break;
+ case VMBUS_MSG_GPADL_HEADER:
+ handle_gpadl_header(vmbus, msgdata, msglen);
+ break;
+ case VMBUS_MSG_GPADL_BODY:
+ handle_gpadl_body(vmbus, msgdata, msglen);
+ break;
+ case VMBUS_MSG_GPADL_TEARDOWN:
+ handle_gpadl_teardown(vmbus, msgdata, msglen);
+ break;
+ case VMBUS_MSG_OPENCHANNEL:
+ handle_open_channel(vmbus, msgdata, msglen);
+ break;
+ case VMBUS_MSG_CLOSECHANNEL:
+ handle_close_channel(vmbus, msgdata, msglen);
+ break;
+ case VMBUS_MSG_UNLOAD:
+ handle_unload(vmbus, msgdata, msglen);
+ break;
+ default:
+ error_report("unknown message type %#x", msg->message_type);
+ break;
+ }
+
+out:
+ vmbus->rx_queue_size--;
+ vmbus->rx_queue_head++;
+ vmbus->rx_queue_head %= HV_MSG_QUEUE_LEN;
+
+ vmbus_resched(vmbus);
+unlock:
+ qemu_mutex_unlock(&vmbus->rx_queue_lock);
+}
+
+static const struct {
+ void (*run)(VMBus *vmbus);
+ bool (*complete)(VMBus *vmbus);
+} state_runner[] = {
+ [VMBUS_LISTEN] = {process_message, NULL},
+ [VMBUS_HANDSHAKE] = {send_handshake, NULL},
+ [VMBUS_OFFER] = {send_offer, complete_offer},
+ [VMBUS_CREATE_GPADL] = {send_create_gpadl, complete_create_gpadl},
+ [VMBUS_TEARDOWN_GPADL] = {send_teardown_gpadl, complete_teardown_gpadl},
+ [VMBUS_OPEN_CHANNEL] = {send_open_channel, complete_open_channel},
+ [VMBUS_UNLOAD] = {send_unload, complete_unload},
+};
+
+static void vmbus_do_run(VMBus *vmbus)
+{
+ if (vmbus->msg_in_progress) {
+ return;
+ }
+
+ assert(vmbus->state < VMBUS_STATE_MAX);
+ assert(state_runner[vmbus->state].run);
+ state_runner[vmbus->state].run(vmbus);
+}
+
+static void vmbus_run(void *opaque)
+{
+ VMBus *vmbus = opaque;
+
+ /* make sure no recursion happens (e.g. due to recursive aio_poll()) */
+ if (vmbus->in_progress) {
+ return;
+ }
+
+ vmbus->in_progress = true;
+ /*
+ * FIXME: if vmbus_resched() is called from within vmbus_do_run(), it
+ * should go *after* the code that can result in aio_poll; otherwise
+ * reschedules can be missed. No idea how to enforce that.
+ */
+ vmbus_do_run(vmbus);
+ vmbus->in_progress = false;
+}
+
+static void vmbus_msg_cb(void *data, int status)
+{
+ VMBus *vmbus = data;
+ bool (*complete)(VMBus *vmbus);
+
+ assert(vmbus->msg_in_progress);
+
+ trace_vmbus_msg_cb(status);
+
+ if (status == -EAGAIN) {
+ goto out;
+ }
+ if (status) {
+ error_report("message delivery fatal failure: %d; aborting vmbus",
+ status);
+ vmbus_reset_all(vmbus);
+ return;
+ }
+
+ assert(vmbus->state < VMBUS_STATE_MAX);
+ complete = state_runner[vmbus->state].complete;
+ if (!complete || complete(vmbus)) {
+ vmbus->state = VMBUS_LISTEN;
+ }
+out:
+ vmbus->msg_in_progress = false;
+ vmbus_resched(vmbus);
+}
+
+static void vmbus_resched(VMBus *vmbus)
+{
+ aio_bh_schedule_oneshot(qemu_get_aio_context(), vmbus_run, vmbus);
+}
+
+static void vmbus_signal_event(EventNotifier *e)
+{
+ VMBusChannel *chan;
+ VMBus *vmbus = container_of(e, VMBus, notifier);
+ unsigned long *int_map;
+ hwaddr addr, len;
+ bool is_dirty = false;
+
+ if (!event_notifier_test_and_clear(e)) {
+ return;
+ }
+
+ trace_vmbus_signal_event();
+
+ if (!vmbus->int_page_gpa) {
+ return;
+ }
+
+ addr = vmbus->int_page_gpa + TARGET_PAGE_SIZE / 2;
+ len = TARGET_PAGE_SIZE / 2;
+ int_map = cpu_physical_memory_map(addr, &len, 1);
+ if (len != TARGET_PAGE_SIZE / 2) {
+ goto unmap;
+ }
+
+ QTAILQ_FOREACH(chan, &vmbus->channel_list, link) {
+ if (bitmap_test_and_clear_atomic(int_map, chan->id, 1)) {
+ if (!vmbus_channel_is_open(chan)) {
+ continue;
+ }
+ vmbus_channel_notify_host(chan);
+ is_dirty = true;
+ }
+ }
+
+unmap:
+ cpu_physical_memory_unmap(int_map, len, 1, is_dirty);
+}
+
+static void vmbus_dev_realize(DeviceState *dev, Error **errp)
+{
+ VMBusDevice *vdev = VMBUS_DEVICE(dev);
+ VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(vdev);
+ VMBus *vmbus = VMBUS(qdev_get_parent_bus(dev));
+ BusChild *child;
+ Error *err = NULL;
+ char idstr[UUID_FMT_LEN + 1];
+
+ assert(!qemu_uuid_is_null(&vdev->instanceid));
+
+ /* Check for instance id collision for this class id */
+ QTAILQ_FOREACH(child, &BUS(vmbus)->children, sibling) {
+ VMBusDevice *child_dev = VMBUS_DEVICE(child->child);
+
+ if (child_dev == vdev) {
+ continue;
+ }
+
+ if (qemu_uuid_is_equal(&child_dev->instanceid, &vdev->instanceid)) {
+ qemu_uuid_unparse(&vdev->instanceid, idstr);
+ error_setg(&err, "duplicate vmbus device instance id %s", idstr);
+ goto error_out;
+ }
+ }
+
+ vdev->dma_as = &address_space_memory;
+
+ create_channels(vmbus, vdev, &err);
+ if (err) {
+ goto error_out;
+ }
+
+ if (vdc->vmdev_realize) {
+ vdc->vmdev_realize(vdev, &err);
+ if (err) {
+ goto err_vdc_realize;
+ }
+ }
+ return;
+
+err_vdc_realize:
+ free_channels(vdev);
+error_out:
+ error_propagate(errp, err);
+}
+
+static void vmbus_dev_reset(DeviceState *dev)
+{
+ uint16_t i;
+ VMBusDevice *vdev = VMBUS_DEVICE(dev);
+ VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(vdev);
+
+ if (vdev->channels) {
+ for (i = 0; i < vdev->num_channels; i++) {
+ VMBusChannel *chan = &vdev->channels[i];
+ close_channel(chan);
+ chan->state = VMCHAN_INIT;
+ }
+ }
+
+ if (vdc->vmdev_reset) {
+ vdc->vmdev_reset(vdev);
+ }
+}
+
+static void vmbus_dev_unrealize(DeviceState *dev)
+{
+ VMBusDevice *vdev = VMBUS_DEVICE(dev);
+ VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(vdev);
+
+ if (vdc->vmdev_unrealize) {
+ vdc->vmdev_unrealize(vdev);
+ }
+ free_channels(vdev);
+}
+
+static void vmbus_dev_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *kdev = DEVICE_CLASS(klass);
+ kdev->bus_type = TYPE_VMBUS;
+ kdev->realize = vmbus_dev_realize;
+ kdev->unrealize = vmbus_dev_unrealize;
+ kdev->reset = vmbus_dev_reset;
+}
+
+static Property vmbus_dev_instanceid =
+ DEFINE_PROP_UUID("instanceid", VMBusDevice, instanceid);
+
+static void vmbus_dev_instance_init(Object *obj)
+{
+ VMBusDevice *vdev = VMBUS_DEVICE(obj);
+ VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(vdev);
+
+ if (!qemu_uuid_is_null(&vdc->instanceid)) {
+ /* Class wants to only have a single instance with a fixed UUID */
+ vdev->instanceid = vdc->instanceid;
+ } else {
+ qdev_property_add_static(DEVICE(vdev), &vmbus_dev_instanceid);
+ }
+}
+
+const VMStateDescription vmstate_vmbus_dev = {
+ .name = TYPE_VMBUS_DEVICE,
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(instanceid.data, VMBusDevice, 16),
+ VMSTATE_UINT16(num_channels, VMBusDevice),
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT16(channels, VMBusDevice,
+ num_channels, vmstate_channel,
+ VMBusChannel),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* vmbus generic device base */
+static const TypeInfo vmbus_dev_type_info = {
+ .name = TYPE_VMBUS_DEVICE,
+ .parent = TYPE_DEVICE,
+ .abstract = true,
+ .instance_size = sizeof(VMBusDevice),
+ .class_size = sizeof(VMBusDeviceClass),
+ .class_init = vmbus_dev_class_init,
+ .instance_init = vmbus_dev_instance_init,
+};
+
+static void vmbus_realize(BusState *bus, Error **errp)
+{
+ int ret = 0;
+ Error *local_err = NULL;
+ VMBus *vmbus = VMBUS(bus);
+
+ qemu_mutex_init(&vmbus->rx_queue_lock);
+
+ QTAILQ_INIT(&vmbus->gpadl_list);
+ QTAILQ_INIT(&vmbus->channel_list);
+
+ ret = hyperv_set_msg_handler(VMBUS_MESSAGE_CONNECTION_ID,
+ vmbus_recv_message, vmbus);
+ if (ret != 0) {
+ error_setg(&local_err, "hyperv set message handler failed: %d", ret);
+ goto error_out;
+ }
+
+ ret = event_notifier_init(&vmbus->notifier, 0);
+ if (ret != 0) {
+ error_setg(&local_err, "event notifier failed to init with %d", ret);
+ goto remove_msg_handler;
+ }
+
+ event_notifier_set_handler(&vmbus->notifier, vmbus_signal_event);
+ ret = hyperv_set_event_flag_handler(VMBUS_EVENT_CONNECTION_ID,
+ &vmbus->notifier);
+ if (ret != 0) {
+ error_setg(&local_err, "hyperv set event handler failed with %d", ret);
+ goto clear_event_notifier;
+ }
+
+ return;
+
+clear_event_notifier:
+ event_notifier_cleanup(&vmbus->notifier);
+remove_msg_handler:
+ hyperv_set_msg_handler(VMBUS_MESSAGE_CONNECTION_ID, NULL, NULL);
+error_out:
+ qemu_mutex_destroy(&vmbus->rx_queue_lock);
+ error_propagate(errp, local_err);
+}
+
+static void vmbus_unrealize(BusState *bus)
+{
+ VMBus *vmbus = VMBUS(bus);
+
+ hyperv_set_msg_handler(VMBUS_MESSAGE_CONNECTION_ID, NULL, NULL);
+ hyperv_set_event_flag_handler(VMBUS_EVENT_CONNECTION_ID, NULL);
+ event_notifier_cleanup(&vmbus->notifier);
+
+ qemu_mutex_destroy(&vmbus->rx_queue_lock);
+}
+
+static void vmbus_reset(BusState *bus)
+{
+ vmbus_deinit(VMBUS(bus));
+}
+
+static char *vmbus_get_dev_path(DeviceState *dev)
+{
+ BusState *bus = qdev_get_parent_bus(dev);
+ return qdev_get_dev_path(bus->parent);
+}
+
+static char *vmbus_get_fw_dev_path(DeviceState *dev)
+{
+ VMBusDevice *vdev = VMBUS_DEVICE(dev);
+ char uuid[UUID_FMT_LEN + 1];
+
+ qemu_uuid_unparse(&vdev->instanceid, uuid);
+ return g_strdup_printf("%s@%s", qdev_fw_name(dev), uuid);
+}
+
+static void vmbus_class_init(ObjectClass *klass, void *data)
+{
+ BusClass *k = BUS_CLASS(klass);
+
+ k->get_dev_path = vmbus_get_dev_path;
+ k->get_fw_dev_path = vmbus_get_fw_dev_path;
+ k->realize = vmbus_realize;
+ k->unrealize = vmbus_unrealize;
+ k->reset = vmbus_reset;
+}
+
+static int vmbus_pre_load(void *opaque)
+{
+ VMBusChannel *chan;
+ VMBus *vmbus = VMBUS(opaque);
+
+ /*
+ * channel IDs allocated by the source will come in the migration stream
+ * for each channel, so clean up the ones allocated at realize
+ */
+ QTAILQ_FOREACH(chan, &vmbus->channel_list, link) {
+ unregister_chan_id(chan);
+ }
+
+ return 0;
+}
+static int vmbus_post_load(void *opaque, int version_id)
+{
+ int ret;
+ VMBus *vmbus = VMBUS(opaque);
+ VMBusGpadl *gpadl;
+ VMBusChannel *chan;
+
+ ret = vmbus_init(vmbus);
+ if (ret) {
+ return ret;
+ }
+
+ QTAILQ_FOREACH(gpadl, &vmbus->gpadl_list, link) {
+ gpadl->vmbus = vmbus;
+ gpadl->refcount = 1;
+ }
+
+ /*
+ * reopening channels depends on initialized vmbus so it's done here
+ * instead of channel_post_load()
+ */
+ QTAILQ_FOREACH(chan, &vmbus->channel_list, link) {
+
+ if (chan->state == VMCHAN_OPENING || chan->state == VMCHAN_OPEN) {
+ open_channel(chan);
+ }
+
+ if (chan->state != VMCHAN_OPEN) {
+ continue;
+ }
+
+ if (!vmbus_channel_is_open(chan)) {
+ /* reopen failed, abort loading */
+ return -1;
+ }
+
+ /* resume processing on the guest side if it missed the notification */
+ hyperv_sint_route_set_sint(chan->notify_route);
+ /* ditto on the host side */
+ vmbus_channel_notify_host(chan);
+ }
+
+ vmbus_resched(vmbus);
+ return 0;
+}
+
+static const VMStateDescription vmstate_post_message_input = {
+ .name = "vmbus/hyperv_post_message_input",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ /*
+ * skip connection_id and message_type as they are validated before
+ * queueing and ignored on dequeueing
+ */
+ VMSTATE_UINT32(payload_size, struct hyperv_post_message_input),
+ VMSTATE_UINT8_ARRAY(payload, struct hyperv_post_message_input,
+ HV_MESSAGE_PAYLOAD_SIZE),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool vmbus_rx_queue_needed(void *opaque)
+{
+ VMBus *vmbus = VMBUS(opaque);
+ return vmbus->rx_queue_size;
+}
+
+static const VMStateDescription vmstate_rx_queue = {
+ .name = "vmbus/rx_queue",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .needed = vmbus_rx_queue_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(rx_queue_head, VMBus),
+ VMSTATE_UINT8(rx_queue_size, VMBus),
+ VMSTATE_STRUCT_ARRAY(rx_queue, VMBus,
+ HV_MSG_QUEUE_LEN, 0,
+ vmstate_post_message_input,
+ struct hyperv_post_message_input),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_vmbus = {
+ .name = TYPE_VMBUS,
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .pre_load = vmbus_pre_load,
+ .post_load = vmbus_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(state, VMBus),
+ VMSTATE_UINT32(version, VMBus),
+ VMSTATE_UINT32(target_vp, VMBus),
+ VMSTATE_UINT64(int_page_gpa, VMBus),
+ VMSTATE_QTAILQ_V(gpadl_list, VMBus, 0,
+ vmstate_gpadl, VMBusGpadl, link),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_rx_queue,
+ NULL
+ }
+};
+
+static const TypeInfo vmbus_type_info = {
+ .name = TYPE_VMBUS,
+ .parent = TYPE_BUS,
+ .instance_size = sizeof(VMBus),
+ .class_init = vmbus_class_init,
+};
+
+static void vmbus_bridge_realize(DeviceState *dev, Error **errp)
+{
+ VMBusBridge *bridge = VMBUS_BRIDGE(dev);
+
+ /*
+ * here there's at least one vmbus bridge that is being realized, so
+ * vmbus_bridge_find can only return NULL if it's not unique
+ */
+ if (!vmbus_bridge_find()) {
+ error_setg(errp, "there can be at most one %s in the system",
+ TYPE_VMBUS_BRIDGE);
+ return;
+ }
+
+ if (!hyperv_is_synic_enabled()) {
+ error_report("VMBus requires usable Hyper-V SynIC and VP_INDEX");
+ return;
+ }
+
+ bridge->bus = VMBUS(qbus_create(TYPE_VMBUS, dev, "vmbus"));
+}
+
+static char *vmbus_bridge_ofw_unit_address(const SysBusDevice *dev)
+{
+ /* there can be only one VMBus */
+ return g_strdup("0");
+}
+
+static const VMStateDescription vmstate_vmbus_bridge = {
+ .name = TYPE_VMBUS_BRIDGE,
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_POINTER(bus, VMBusBridge, vmstate_vmbus, VMBus),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static Property vmbus_bridge_props[] = {
+ DEFINE_PROP_UINT8("irq0", VMBusBridge, irq0, 7),
+ DEFINE_PROP_UINT8("irq1", VMBusBridge, irq1, 13),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static void vmbus_bridge_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *k = DEVICE_CLASS(klass);
+ SysBusDeviceClass *sk = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->realize = vmbus_bridge_realize;
+ k->fw_name = "vmbus";
+ sk->explicit_ofw_unit_address = vmbus_bridge_ofw_unit_address;
+ set_bit(DEVICE_CATEGORY_BRIDGE, k->categories);
+ k->vmsd = &vmstate_vmbus_bridge;
+ device_class_set_props(k, vmbus_bridge_props);
+ /* override SysBusDevice's default */
+ k->user_creatable = true;
+}
+
+static const TypeInfo vmbus_bridge_type_info = {
+ .name = TYPE_VMBUS_BRIDGE,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(VMBusBridge),
+ .class_init = vmbus_bridge_class_init,
+};
+
+static void vmbus_register_types(void)
+{
+ type_register_static(&vmbus_bridge_type_info);
+ type_register_static(&vmbus_dev_type_info);
+ type_register_static(&vmbus_type_info);
+}
+
+type_init(vmbus_register_types)
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 473cbdfffd..900f786d08 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -51,6 +51,7 @@
#include "hw/mem/nvdimm.h"
#include "sysemu/numa.h"
#include "sysemu/reset.h"
+#include "hw/hyperv/vmbus-bridge.h"
/* Supported chipsets: */
#include "hw/southbridge/piix.h"
@@ -1052,9 +1053,47 @@ static Aml *build_mouse_device_aml(void)
return dev;
}
+static Aml *build_vmbus_device_aml(VMBusBridge *vmbus_bridge)
+{
+ Aml *dev;
+ Aml *method;
+ Aml *crs;
+
+ dev = aml_device("VMBS");
+ aml_append(dev, aml_name_decl("STA", aml_int(0xF)));
+ aml_append(dev, aml_name_decl("_HID", aml_string("VMBus")));
+ aml_append(dev, aml_name_decl("_UID", aml_int(0x0)));
+ aml_append(dev, aml_name_decl("_DDN", aml_string("VMBUS")));
+
+ method = aml_method("_DIS", 0, AML_NOTSERIALIZED);
+ aml_append(method, aml_store(aml_and(aml_name("STA"), aml_int(0xD), NULL),
+ aml_name("STA")));
+ aml_append(dev, method);
+
+ method = aml_method("_PS0", 0, AML_NOTSERIALIZED);
+ aml_append(method, aml_store(aml_or(aml_name("STA"), aml_int(0xF), NULL),
+ aml_name("STA")));
+ aml_append(dev, method);
+
+ method = aml_method("_STA", 0, AML_NOTSERIALIZED);
+ aml_append(method, aml_return(aml_name("STA")));
+ aml_append(dev, method);
+
+ aml_append(dev, aml_name_decl("_PS3", aml_int(0x0)));
+
+ crs = aml_resource_template();
+ aml_append(crs, aml_irq_no_flags(vmbus_bridge->irq0));
+ /* FIXME: newer HyperV gets by with only one IRQ */
+ aml_append(crs, aml_irq_no_flags(vmbus_bridge->irq1));
+ aml_append(dev, aml_name_decl("_CRS", crs));
+
+ return dev;
+}
+
static void build_isa_devices_aml(Aml *table)
{
ISADevice *fdc = pc_find_fdc0();
+ VMBusBridge *vmbus_bridge = vmbus_bridge_find();
bool ambiguous;
Aml *scope = aml_scope("_SB.PCI0.ISA");
@@ -1075,6 +1114,10 @@ static void build_isa_devices_aml(Aml *table)
isa_build_aml(ISA_BUS(obj), scope);
}
+ if (vmbus_bridge) {
+ aml_append(scope, build_vmbus_device_aml(vmbus_bridge));
+ }
+
aml_append(table, scope);
}
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
index fd75cae024..4346060e62 100644
--- a/hw/i386/amd_iommu.c
+++ b/hw/i386/amd_iommu.c
@@ -370,7 +370,7 @@ static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd)
hwaddr addr = cpu_to_le64(extract64(cmd[0], 3, 49)) << 3;
uint64_t data = cpu_to_le64(cmd[1]);
- if (extract64(cmd[0], 51, 8)) {
+ if (extract64(cmd[0], 52, 8)) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
s->cmdbuf + s->cmdbuf_head);
}
@@ -395,7 +395,7 @@ static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd)
uint16_t devid = cpu_to_le16((uint16_t)extract64(cmd[0], 0, 16));
/* This command should invalidate internal caches of which there isn't */
- if (extract64(cmd[0], 15, 16) || cmd[1]) {
+ if (extract64(cmd[0], 16, 44) || cmd[1]) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
s->cmdbuf + s->cmdbuf_head);
}
@@ -405,9 +405,9 @@ static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd)
static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd)
{
- if (extract64(cmd[0], 15, 16) || extract64(cmd[0], 19, 8) ||
+ if (extract64(cmd[0], 16, 16) || extract64(cmd[0], 52, 8) ||
extract64(cmd[1], 0, 2) || extract64(cmd[1], 3, 29)
- || extract64(cmd[1], 47, 16)) {
+ || extract64(cmd[1], 48, 16)) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
s->cmdbuf + s->cmdbuf_head);
}
@@ -438,8 +438,8 @@ static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd)
{
uint16_t domid = cpu_to_le16((uint16_t)extract64(cmd[0], 32, 16));
- if (extract64(cmd[0], 20, 12) || extract64(cmd[0], 16, 12) ||
- extract64(cmd[0], 3, 10)) {
+ if (extract64(cmd[0], 20, 12) || extract64(cmd[0], 48, 12) ||
+ extract64(cmd[1], 3, 9)) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
s->cmdbuf + s->cmdbuf_head);
}
@@ -451,7 +451,7 @@ static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd)
static void amdvi_prefetch_pages(AMDVIState *s, uint64_t *cmd)
{
- if (extract64(cmd[0], 16, 8) || extract64(cmd[0], 20, 8) ||
+ if (extract64(cmd[0], 16, 8) || extract64(cmd[0], 52, 8) ||
extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) ||
extract64(cmd[1], 5, 7)) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
@@ -463,7 +463,7 @@ static void amdvi_prefetch_pages(AMDVIState *s, uint64_t *cmd)
static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd)
{
- if (extract64(cmd[0], 16, 16) || cmd[1]) {
+ if (extract64(cmd[0], 16, 44) || cmd[1]) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
s->cmdbuf + s->cmdbuf_head);
return;
@@ -479,7 +479,8 @@ static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd)
{
uint16_t devid = extract64(cmd[0], 0, 16);
- if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 9)) {
+ if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) ||
+ extract64(cmd[1], 6, 6)) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
s->cmdbuf + s->cmdbuf_head);
return;
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 2128f3d6fe..143ac1c354 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -31,6 +31,7 @@
#include "hw/i386/apic.h"
#include "hw/i386/topology.h"
#include "hw/i386/fw_cfg.h"
+#include "hw/i386/vmport.h"
#include "sysemu/cpus.h"
#include "hw/block/fdc.h"
#include "hw/ide.h"
@@ -56,6 +57,7 @@
#include "sysemu/tcg.h"
#include "sysemu/numa.h"
#include "sysemu/kvm.h"
+#include "sysemu/xen.h"
#include "sysemu/qtest.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"
@@ -91,7 +93,6 @@
#include "qapi/qmp/qerror.h"
#include "config-devices.h"
#include "e820_memory_layout.h"
-#include "vmport.h"
#include "fw_cfg.h"
#include "trace.h"
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index f66e1d73ce..054d3aa9f7 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -53,6 +53,7 @@
#include "cpu.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
+#include "sysemu/xen.h"
#ifdef CONFIG_XEN
#include <xen/hvm/hvm_info_table.h>
#include "hw/xen/xen_pt.h"
@@ -60,6 +61,7 @@
#include "migration/global_state.h"
#include "migration/misc.h"
#include "sysemu/numa.h"
+#include "hw/hyperv/vmbus-bridge.h"
#include "hw/mem/nvdimm.h"
#include "hw/i386/acpi-build.h"
@@ -375,7 +377,7 @@ static void pc_init_isa(MachineState *machine)
#ifdef CONFIG_XEN
static void pc_xen_hvm_init_pci(MachineState *machine)
{
- const char *pci_type = has_igd_gfx_passthru ?
+ const char *pci_type = xen_igd_gfx_pt_enabled() ?
TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE : TYPE_I440FX_PCI_DEVICE;
pc_init1(machine,
@@ -419,6 +421,7 @@ static void pc_i440fx_machine_options(MachineClass *m)
m->default_machine_opts = "firmware=bios-256k.bin";
m->default_display = "std";
machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE);
+ machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
}
static void pc_i440fx_5_1_machine_options(MachineClass *m)
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index 4ba8ac8774..fa9ef449d1 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -36,6 +36,7 @@
#include "hw/rtc/mc146818rtc.h"
#include "hw/xen/xen.h"
#include "sysemu/kvm.h"
+#include "sysemu/xen.h"
#include "hw/kvm/clock.h"
#include "hw/pci-host/q35.h"
#include "hw/qdev-properties.h"
@@ -53,6 +54,7 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "sysemu/numa.h"
+#include "hw/hyperv/vmbus-bridge.h"
#include "hw/mem/nvdimm.h"
#include "hw/i386/acpi-build.h"
@@ -348,6 +350,7 @@ static void pc_q35_machine_options(MachineClass *m)
machine_class_allow_dynamic_sysbus_dev(m, TYPE_AMD_IOMMU_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_INTEL_IOMMU_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE);
+ machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
m->max_cpus = 288;
}
diff --git a/hw/i386/vmmouse.c b/hw/i386/vmmouse.c
index b3aef41327..ba5c987bd2 100644
--- a/hw/i386/vmmouse.c
+++ b/hw/i386/vmmouse.c
@@ -25,21 +25,15 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "ui/console.h"
+#include "hw/i386/vmport.h"
#include "hw/input/i8042.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
-#include "vmport.h"
#include "cpu.h"
/* debug only vmmouse */
//#define DEBUG_VMMOUSE
-/* VMMouse Commands */
-#define VMMOUSE_GETVERSION 10
-#define VMMOUSE_DATA 39
-#define VMMOUSE_STATUS 40
-#define VMMOUSE_COMMAND 41
-
#define VMMOUSE_READ_ID 0x45414552
#define VMMOUSE_DISABLE 0x000000f5
#define VMMOUSE_REQUEST_RELATIVE 0x4c455252
@@ -217,10 +211,10 @@ static uint32_t vmmouse_ioport_read(void *opaque, uint32_t addr)
command = data[2] & 0xFFFF;
switch (command) {
- case VMMOUSE_STATUS:
+ case VMPORT_CMD_VMMOUSE_STATUS:
data[0] = vmmouse_get_status(s);
break;
- case VMMOUSE_COMMAND:
+ case VMPORT_CMD_VMMOUSE_COMMAND:
switch (data[1]) {
case VMMOUSE_DISABLE:
vmmouse_disable(s);
@@ -239,7 +233,7 @@ static uint32_t vmmouse_ioport_read(void *opaque, uint32_t addr)
break;
}
break;
- case VMMOUSE_DATA:
+ case VMPORT_CMD_VMMOUSE_DATA:
vmmouse_data(s, data, data[1]);
break;
default:
@@ -296,9 +290,9 @@ static void vmmouse_realizefn(DeviceState *dev, Error **errp)
return;
}
- vmport_register(VMMOUSE_STATUS, vmmouse_ioport_read, s);
- vmport_register(VMMOUSE_COMMAND, vmmouse_ioport_read, s);
- vmport_register(VMMOUSE_DATA, vmmouse_ioport_read, s);
+ vmport_register(VMPORT_CMD_VMMOUSE_STATUS, vmmouse_ioport_read, s);
+ vmport_register(VMPORT_CMD_VMMOUSE_COMMAND, vmmouse_ioport_read, s);
+ vmport_register(VMPORT_CMD_VMMOUSE_DATA, vmmouse_ioport_read, s);
}
static Property vmmouse_properties[] = {
diff --git a/hw/i386/vmport.c b/hw/i386/vmport.c
index 1aaaab691a..89bda9108e 100644
--- a/hw/i386/vmport.c
+++ b/hw/i386/vmport.c
@@ -21,20 +21,47 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
+
+/*
+ * Guest code that interacts with this virtual device can be found
+ * in VMware open-vm-tools open-source project:
+ * https://github.com/vmware/open-vm-tools
+ */
+
#include "qemu/osdep.h"
#include "hw/isa/isa.h"
+#include "hw/i386/vmport.h"
+#include "hw/qdev-properties.h"
+#include "sysemu/sysemu.h"
#include "sysemu/hw_accel.h"
+#include "sysemu/qtest.h"
#include "qemu/log.h"
-#include "vmport.h"
#include "cpu.h"
#include "trace.h"
-#define VMPORT_CMD_GETVERSION 0x0a
-#define VMPORT_CMD_GETRAMSIZE 0x14
-
-#define VMPORT_ENTRIES 0x2c
#define VMPORT_MAGIC 0x564D5868
+/* Compatibility flags for migration */
+#define VMPORT_COMPAT_READ_SET_EAX_BIT 0
+#define VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD_BIT 1
+#define VMPORT_COMPAT_REPORT_VMX_TYPE_BIT 2
+#define VMPORT_COMPAT_CMDS_V2_BIT 3
+#define VMPORT_COMPAT_READ_SET_EAX \
+ (1 << VMPORT_COMPAT_READ_SET_EAX_BIT)
+#define VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD \
+ (1 << VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD_BIT)
+#define VMPORT_COMPAT_REPORT_VMX_TYPE \
+ (1 << VMPORT_COMPAT_REPORT_VMX_TYPE_BIT)
+#define VMPORT_COMPAT_CMDS_V2 \
+ (1 << VMPORT_COMPAT_CMDS_V2_BIT)
+
+/* vCPU features reported by CMD_GET_VCPU_INFO */
+#define VCPU_INFO_SLC64_BIT 0
+#define VCPU_INFO_SYNC_VTSCS_BIT 1
+#define VCPU_INFO_HV_REPLAY_OK_BIT 2
+#define VCPU_INFO_LEGACY_X2APIC_BIT 3
+#define VCPU_INFO_RESERVED_BIT 31
+
#define VMPORT(obj) OBJECT_CHECK(VMPortState, (obj), TYPE_VMPORT)
typedef struct VMPortState {
@@ -43,15 +70,19 @@ typedef struct VMPortState {
MemoryRegion io;
VMPortReadFunc *func[VMPORT_ENTRIES];
void *opaque[VMPORT_ENTRIES];
+
+ uint32_t vmware_vmx_version;
+ uint8_t vmware_vmx_type;
+
+ uint32_t compat_flags;
} VMPortState;
static VMPortState *port_state;
-void vmport_register(unsigned char command, VMPortReadFunc *func, void *opaque)
+void vmport_register(VMPortCommand command, VMPortReadFunc *func, void *opaque)
{
- if (command >= VMPORT_ENTRIES) {
- return;
- }
+ assert(command < VMPORT_ENTRIES);
+ assert(port_state);
trace_vmport_register(command, func, opaque);
port_state->func[command] = func;
@@ -64,25 +95,51 @@ static uint64_t vmport_ioport_read(void *opaque, hwaddr addr,
VMPortState *s = opaque;
CPUState *cs = current_cpu;
X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
+ CPUX86State *env;
unsigned char command;
uint32_t eax;
+ if (qtest_enabled()) {
+ return -1;
+ }
+ env = &cpu->env;
cpu_synchronize_state(cs);
eax = env->regs[R_EAX];
if (eax != VMPORT_MAGIC) {
- return eax;
+ goto err;
}
command = env->regs[R_ECX];
trace_vmport_command(command);
if (command >= VMPORT_ENTRIES || !s->func[command]) {
qemu_log_mask(LOG_UNIMP, "vmport: unknown command %x\n", command);
- return eax;
+ goto err;
}
- return s->func[command](s->opaque[command], addr);
+ eax = s->func[command](s->opaque[command], addr);
+ goto out;
+
+err:
+ if (s->compat_flags & VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD) {
+ eax = UINT32_MAX;
+ }
+
+out:
+ /*
+ * The call above to cpu_synchronize_state() gets vCPU registers values
+ * to QEMU but also cause QEMU to write QEMU vCPU registers values to
+ * vCPU implementation (e.g. Accelerator such as KVM) just before
+ * resuming guest.
+ *
+ * Therefore, in order to make IOPort return value propagate to
+ * guest EAX, we need to explicitly update QEMU EAX register value.
+ */
+ if (s->compat_flags & VMPORT_COMPAT_READ_SET_EAX) {
+ cpu->env.regs[R_EAX] = eax;
+ }
+
+ return eax;
}
static void vmport_ioport_write(void *opaque, hwaddr addr,
@@ -90,6 +147,9 @@ static void vmport_ioport_write(void *opaque, hwaddr addr,
{
X86CPU *cpu = X86_CPU(current_cpu);
+ if (qtest_enabled()) {
+ return;
+ }
cpu->env.regs[R_EAX] = vmport_ioport_read(opaque, addr, 4);
}
@@ -97,18 +157,69 @@ static uint32_t vmport_cmd_get_version(void *opaque, uint32_t addr)
{
X86CPU *cpu = X86_CPU(current_cpu);
+ if (qtest_enabled()) {
+ return -1;
+ }
cpu->env.regs[R_EBX] = VMPORT_MAGIC;
- return 6;
+ if (port_state->compat_flags & VMPORT_COMPAT_REPORT_VMX_TYPE) {
+ cpu->env.regs[R_ECX] = port_state->vmware_vmx_type;
+ }
+ return port_state->vmware_vmx_version;
+}
+
+static uint32_t vmport_cmd_get_bios_uuid(void *opaque, uint32_t addr)
+{
+ X86CPU *cpu = X86_CPU(current_cpu);
+ uint32_t *uuid_parts = (uint32_t *)(qemu_uuid.data);
+
+ cpu->env.regs[R_EAX] = le32_to_cpu(uuid_parts[0]);
+ cpu->env.regs[R_EBX] = le32_to_cpu(uuid_parts[1]);
+ cpu->env.regs[R_ECX] = le32_to_cpu(uuid_parts[2]);
+ cpu->env.regs[R_EDX] = le32_to_cpu(uuid_parts[3]);
+ return cpu->env.regs[R_EAX];
}
static uint32_t vmport_cmd_ram_size(void *opaque, uint32_t addr)
{
X86CPU *cpu = X86_CPU(current_cpu);
+ if (qtest_enabled()) {
+ return -1;
+ }
cpu->env.regs[R_EBX] = 0x1177;
return ram_size;
}
+static uint32_t vmport_cmd_get_hz(void *opaque, uint32_t addr)
+{
+ X86CPU *cpu = X86_CPU(current_cpu);
+
+ if (cpu->env.tsc_khz && cpu->env.apic_bus_freq) {
+ uint64_t tsc_freq = (uint64_t)cpu->env.tsc_khz * 1000;
+
+ cpu->env.regs[R_ECX] = cpu->env.apic_bus_freq;
+ cpu->env.regs[R_EBX] = (uint32_t)(tsc_freq >> 32);
+ cpu->env.regs[R_EAX] = (uint32_t)tsc_freq;
+ } else {
+ /* Signal cmd as not supported */
+ cpu->env.regs[R_EBX] = UINT32_MAX;
+ }
+
+ return cpu->env.regs[R_EAX];
+}
+
+static uint32_t vmport_cmd_get_vcpu_info(void *opaque, uint32_t addr)
+{
+ X86CPU *cpu = X86_CPU(current_cpu);
+ uint32_t ret = 0;
+
+ if (cpu->env.features[FEAT_1_ECX] & CPUID_EXT_X2APIC) {
+ ret |= 1 << VCPU_INFO_LEGACY_X2APIC_BIT;
+ }
+
+ return ret;
+}
+
static const MemoryRegionOps vmport_ops = {
.read = vmport_ioport_read,
.write = vmport_ioport_write,
@@ -128,11 +239,54 @@ static void vmport_realizefn(DeviceState *dev, Error **errp)
isa_register_ioport(isadev, &s->io, 0x5658);
port_state = s;
+
/* Register some generic port commands */
vmport_register(VMPORT_CMD_GETVERSION, vmport_cmd_get_version, NULL);
vmport_register(VMPORT_CMD_GETRAMSIZE, vmport_cmd_ram_size, NULL);
+ if (s->compat_flags & VMPORT_COMPAT_CMDS_V2) {
+ vmport_register(VMPORT_CMD_GETBIOSUUID, vmport_cmd_get_bios_uuid, NULL);
+ vmport_register(VMPORT_CMD_GETHZ, vmport_cmd_get_hz, NULL);
+ vmport_register(VMPORT_CMD_GET_VCPU_INFO, vmport_cmd_get_vcpu_info,
+ NULL);
+ }
}
+static Property vmport_properties[] = {
+ /* Used to enforce compatibility for migration */
+ DEFINE_PROP_BIT("x-read-set-eax", VMPortState, compat_flags,
+ VMPORT_COMPAT_READ_SET_EAX_BIT, true),
+ DEFINE_PROP_BIT("x-signal-unsupported-cmd", VMPortState, compat_flags,
+ VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD_BIT, true),
+ DEFINE_PROP_BIT("x-report-vmx-type", VMPortState, compat_flags,
+ VMPORT_COMPAT_REPORT_VMX_TYPE_BIT, true),
+ DEFINE_PROP_BIT("x-cmds-v2", VMPortState, compat_flags,
+ VMPORT_COMPAT_CMDS_V2_BIT, true),
+
+ /* Default value taken from open-vm-tools code VERSION_MAGIC definition */
+ DEFINE_PROP_UINT32("vmware-vmx-version", VMPortState,
+ vmware_vmx_version, 6),
+ /*
+ * Value determines which VMware product type host report itself to guest.
+ *
+ * Most guests are fine with exposing host as VMware ESX server.
+ * Some legacy/proprietary guests hard-code a given type.
+ *
+ * For a complete list of values, refer to enum VMXType at open-vm-tools
+ * project (Defined at lib/include/vm_vmx_type.h).
+ *
+ * Reasonable options:
+ * 0 - Unset
+ * 1 - VMware Express (deprecated)
+ * 2 - VMware ESX Server
+ * 3 - VMware Server (Deprecated)
+ * 4 - VMware Workstation
+ * 5 - ACE 1.x (Deprecated)
+ */
+ DEFINE_PROP_UINT8("vmware-vmx-type", VMPortState, vmware_vmx_type, 2),
+
+ DEFINE_PROP_END_OF_LIST(),
+};
+
static void vmport_class_initfn(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -140,6 +294,7 @@ static void vmport_class_initfn(ObjectClass *klass, void *data)
dc->realize = vmport_realizefn;
/* Reason: realize sets global port_state */
dc->user_creatable = false;
+ device_class_set_props(dc, vmport_properties);
}
static const TypeInfo vmport_info = {
diff --git a/hw/i386/vmport.h b/hw/i386/vmport.h
deleted file mode 100644
index 47eda7a22b..0000000000
--- a/hw/i386/vmport.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * QEMU VMPort emulation
- *
- * Copyright (C) 2007 Hervé Poussineau
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef HW_I386_VMPORT_H
-#define HW_I386_VMPORT_H
-
-#define TYPE_VMPORT "vmport"
-
-typedef uint32_t (VMPortReadFunc)(void *opaque, uint32_t address);
-
-void vmport_register(unsigned char command, VMPortReadFunc *func, void *opaque);
-
-#endif
diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
index 94fe5d65e9..628bde5fac 100644
--- a/hw/i386/xen/xen-hvm.c
+++ b/hw/i386/xen/xen-hvm.c
@@ -29,6 +29,7 @@
#include "qemu/range.h"
#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
+#include "sysemu/xen.h"
#include "sysemu/xen-mapcache.h"
#include "trace.h"
#include "exec/address-spaces.h"
diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c
index 0f7b05e5e1..a1492fdecd 100644
--- a/hw/i386/xen/xen_platform.c
+++ b/hw/i386/xen/xen_platform.c
@@ -33,6 +33,7 @@
#include "hw/xen/xen-legacy-backend.h"
#include "trace.h"
#include "exec/address-spaces.h"
+#include "sysemu/xen.h"
#include "sysemu/block-backend.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c
index ffe30dc457..bca71b5934 100644
--- a/hw/intc/ioapic.c
+++ b/hw/intc/ioapic.c
@@ -241,6 +241,25 @@ void ioapic_eoi_broadcast(int vector)
continue;
}
+#ifdef CONFIG_KVM
+ /*
+ * When IOAPIC is in the userspace while APIC is still in
+ * the kernel (i.e., split irqchip), we have a trick to
+ * kick the resamplefd logic for registered irqfds from
+ * userspace to deactivate the IRQ. When that happens, it
+ * means the irq bypassed userspace IOAPIC (so the irr and
+ * remote-irr of the table entry should be bypassed too
+ * even if interrupt come). Still kick the resamplefds if
+ * they're bound to the IRQ, to make sure to EOI the
+ * interrupt for the hardware correctly.
+ *
+ * Note: We still need to go through the irr & remote-irr
+ * operations below because we don't know whether there're
+ * emulated devices that are using/sharing the same IRQ.
+ */
+ kvm_resample_fd_notify(n);
+#endif
+
if (!(entry & IOAPIC_LVT_REMOTE_IRR)) {
continue;
}
diff --git a/hw/isa/piix3.c b/hw/isa/piix3.c
index fd1c78879f..1a5267e19f 100644
--- a/hw/isa/piix3.c
+++ b/hw/isa/piix3.c
@@ -28,6 +28,7 @@
#include "hw/irq.h"
#include "hw/isa/isa.h"
#include "hw/xen/xen.h"
+#include "sysemu/xen.h"
#include "sysemu/sysemu.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"
diff --git a/hw/pci-host/pam.c b/hw/pci-host/pam.c
index 45c4333cd3..a496205783 100644
--- a/hw/pci-host/pam.c
+++ b/hw/pci-host/pam.c
@@ -28,7 +28,6 @@
*/
#include "qemu/osdep.h"
-#include "qom/object.h"
#include "hw/pci-host/pam.h"
void init_pam(DeviceState *dev, MemoryRegion *ram_memory,
diff --git a/hw/pci/msix.c b/hw/pci/msix.c
index e6a5559038..67e34f34d6 100644
--- a/hw/pci/msix.c
+++ b/hw/pci/msix.c
@@ -19,6 +19,7 @@
#include "hw/pci/msix.h"
#include "hw/pci/pci.h"
#include "hw/xen/xen.h"
+#include "sysemu/xen.h"
#include "migration/qemu-file-types.h"
#include "migration/vmstate.h"
#include "qemu/range.h"
diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
index af18c88b65..634af0bbb8 100644
--- a/hw/scsi/megasas.c
+++ b/hw/scsi/megasas.c
@@ -86,34 +86,34 @@ typedef struct MegasasState {
MemoryRegion queue_io;
uint32_t frame_hi;
- int fw_state;
+ uint32_t fw_state;
uint32_t fw_sge;
uint32_t fw_cmds;
uint32_t flags;
- int fw_luns;
- int intr_mask;
- int doorbell;
- int busy;
- int diag;
- int adp_reset;
+ uint32_t fw_luns;
+ uint32_t intr_mask;
+ uint32_t doorbell;
+ uint32_t busy;
+ uint32_t diag;
+ uint32_t adp_reset;
OnOffAuto msi;
OnOffAuto msix;
MegasasCmd *event_cmd;
- int event_locale;
+ uint16_t event_locale;
int event_class;
- int event_count;
- int shutdown_event;
- int boot_event;
+ uint32_t event_count;
+ uint32_t shutdown_event;
+ uint32_t boot_event;
uint64_t sas_addr;
char *hba_serial;
uint64_t reply_queue_pa;
void *reply_queue;
- int reply_queue_len;
- int reply_queue_head;
- int reply_queue_tail;
+ uint16_t reply_queue_len;
+ uint16_t reply_queue_head;
+ uint16_t reply_queue_tail;
uint64_t consumer_pa;
uint64_t producer_pa;
@@ -445,7 +445,7 @@ static MegasasCmd *megasas_lookup_frame(MegasasState *s,
index = s->reply_queue_head;
- while (num < s->fw_cmds) {
+ while (num < s->fw_cmds && index < MEGASAS_MAX_FRAMES) {
if (s->frames[index].pa && s->frames[index].pa == frame) {
cmd = &s->frames[index];
break;
@@ -504,7 +504,7 @@ static MegasasCmd *megasas_enqueue_frame(MegasasState *s,
cmd->pa = frame;
/* Map all possible frames */
cmd->frame = pci_dma_map(pcid, frame, &frame_size_p, 0);
- if (frame_size_p != frame_size) {
+ if (!cmd->frame || frame_size_p != frame_size) {
trace_megasas_qf_map_failed(cmd->index, (unsigned long)frame);
if (cmd->frame) {
megasas_unmap_frame(s, cmd);
@@ -2259,9 +2259,9 @@ static const VMStateDescription vmstate_megasas_gen1 = {
VMSTATE_PCI_DEVICE(parent_obj, MegasasState),
VMSTATE_MSIX(parent_obj, MegasasState),
- VMSTATE_INT32(fw_state, MegasasState),
- VMSTATE_INT32(intr_mask, MegasasState),
- VMSTATE_INT32(doorbell, MegasasState),
+ VMSTATE_UINT32(fw_state, MegasasState),
+ VMSTATE_UINT32(intr_mask, MegasasState),
+ VMSTATE_UINT32(doorbell, MegasasState),
VMSTATE_UINT64(reply_queue_pa, MegasasState),
VMSTATE_UINT64(consumer_pa, MegasasState),
VMSTATE_UINT64(producer_pa, MegasasState),
@@ -2278,9 +2278,9 @@ static const VMStateDescription vmstate_megasas_gen2 = {
VMSTATE_PCI_DEVICE(parent_obj, MegasasState),
VMSTATE_MSIX(parent_obj, MegasasState),
- VMSTATE_INT32(fw_state, MegasasState),
- VMSTATE_INT32(intr_mask, MegasasState),
- VMSTATE_INT32(doorbell, MegasasState),
+ VMSTATE_UINT32(fw_state, MegasasState),
+ VMSTATE_UINT32(intr_mask, MegasasState),
+ VMSTATE_UINT32(doorbell, MegasasState),
VMSTATE_UINT64(reply_queue_pa, MegasasState),
VMSTATE_UINT64(consumer_pa, MegasasState),
VMSTATE_UINT64(producer_pa, MegasasState),
diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
index cbb5d97599..f2e524438a 100644
--- a/hw/scsi/vhost-user-scsi.c
+++ b/hw/scsi/vhost-user-scsi.c
@@ -18,7 +18,6 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "qom/object.h"
#include "hw/fw-path-provider.h"
#include "hw/qdev-core.h"
#include "hw/qdev-properties.h"
diff --git a/hw/usb/hcd-musb.c b/hw/usb/hcd-musb.c
index c29fbef6fc..85f5ff5bd4 100644
--- a/hw/usb/hcd-musb.c
+++ b/hw/usb/hcd-musb.c
@@ -23,6 +23,7 @@
#include "qemu/osdep.h"
#include "qemu/timer.h"
#include "hw/usb.h"
+#include "hw/usb/hcd-musb.h"
#include "hw/irq.h"
#include "hw/hw.h"
@@ -1539,13 +1540,13 @@ static void musb_writew(void *opaque, hwaddr addr, uint32_t value)
};
}
-CPUReadMemoryFunc * const musb_read[] = {
+MUSBReadFunc * const musb_read[] = {
musb_readb,
musb_readh,
musb_readw,
};
-CPUWriteMemoryFunc * const musb_write[] = {
+MUSBWriteFunc * const musb_write[] = {
musb_writeb,
musb_writeh,
musb_writew,
diff --git a/hw/usb/tusb6010.c b/hw/usb/tusb6010.c
index 17580876c6..27eb28d3e4 100644
--- a/hw/usb/tusb6010.c
+++ b/hw/usb/tusb6010.c
@@ -23,6 +23,7 @@
#include "qemu/module.h"
#include "qemu/timer.h"
#include "hw/usb.h"
+#include "hw/usb/hcd-musb.h"
#include "hw/arm/omap.h"
#include "hw/hw.h"
#include "hw/irq.h"
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 342dd6b912..6838bcc4b3 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -115,11 +115,7 @@ static void vfio_intx_eoi(VFIODevice *vbasedev)
static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
{
#ifdef CONFIG_KVM
- struct kvm_irqfd irqfd = {
- .fd = event_notifier_get_fd(&vdev->intx.interrupt),
- .gsi = vdev->intx.route.irq,
- .flags = KVM_IRQFD_FLAG_RESAMPLE,
- };
+ int irq_fd = event_notifier_get_fd(&vdev->intx.interrupt);
Error *err = NULL;
if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
@@ -129,7 +125,7 @@ static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
}
/* Get to a known interrupt state */
- qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
+ qemu_set_fd_handler(irq_fd, NULL, NULL, vdev);
vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
vdev->intx.pending = false;
pci_irq_deassert(&vdev->pdev);
@@ -140,17 +136,18 @@ static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
goto fail;
}
- /* KVM triggers it, VFIO listens for it */
- irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
-
- if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
+ if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
+ &vdev->intx.interrupt,
+ &vdev->intx.unmask,
+ vdev->intx.route.irq)) {
error_setg_errno(errp, errno, "failed to setup resample irqfd");
goto fail_irqfd;
}
if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
VFIO_IRQ_SET_ACTION_UNMASK,
- irqfd.resamplefd, &err)) {
+ event_notifier_get_fd(&vdev->intx.unmask),
+ &err)) {
error_propagate(errp, err);
goto fail_vfio;
}
@@ -165,12 +162,12 @@ static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
return;
fail_vfio:
- irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
- kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
+ kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
+ vdev->intx.route.irq);
fail_irqfd:
event_notifier_cleanup(&vdev->intx.unmask);
fail:
- qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
+ qemu_set_fd_handler(irq_fd, vfio_intx_interrupt, NULL, vdev);
vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
#endif
}
@@ -178,12 +175,6 @@ fail:
static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
{
#ifdef CONFIG_KVM
- struct kvm_irqfd irqfd = {
- .fd = event_notifier_get_fd(&vdev->intx.interrupt),
- .gsi = vdev->intx.route.irq,
- .flags = KVM_IRQFD_FLAG_DEASSIGN,
- };
-
if (!vdev->intx.kvm_accel) {
return;
}
@@ -197,7 +188,8 @@ static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
pci_irq_deassert(&vdev->pdev);
/* Tell KVM to stop listening for an INTx irqfd */
- if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
+ if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
+ vdev->intx.route.irq)) {
error_report("vfio: Error: Failed to disable INTx irqfd: %m");
}
@@ -205,7 +197,8 @@ static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
event_notifier_cleanup(&vdev->intx.unmask);
/* QEMU starts listening for interrupt events. */
- qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
+ qemu_set_fd_handler(event_notifier_get_fd(&vdev->intx.interrupt),
+ vfio_intx_interrupt, NULL, vdev);
vdev->intx.kvm_accel = false;
diff --git a/hw/xen/Makefile.objs b/hw/xen/Makefile.objs
index 84df60a928..3fc715e595 100644
--- a/hw/xen/Makefile.objs
+++ b/hw/xen/Makefile.objs
@@ -1,6 +1,7 @@
# xen backend driver support
-common-obj-$(CONFIG_XEN) += xen-legacy-backend.o xen_devconfig.o xen_pvdev.o xen-common.o xen-bus.o xen-bus-helper.o xen-backend.o
+common-obj-y += xen-legacy-backend.o xen_devconfig.o xen_pvdev.o xen-bus.o xen-bus-helper.o xen-backend.o
obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen-host-pci-device.o
obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen_pt.o xen_pt_config_init.o xen_pt_graphics.o xen_pt_msi.o
obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen_pt_load_rom.o
+obj-$(call $(lnot, $(CONFIG_XEN_PCI_PASSTHROUGH))) += xen_pt_stub.o
diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c
index 81d5ad8da7..ab84443d5e 100644
--- a/hw/xen/xen_pt.c
+++ b/hw/xen/xen_pt.c
@@ -65,7 +65,17 @@
#include "qemu/range.h"
#include "exec/address-spaces.h"
-bool has_igd_gfx_passthru;
+static bool has_igd_gfx_passthru;
+
+bool xen_igd_gfx_pt_enabled(void)
+{
+ return has_igd_gfx_passthru;
+}
+
+void xen_igd_gfx_pt_set(bool value, Error **errp)
+{
+ has_igd_gfx_passthru = value;
+}
#define XEN_PT_NR_IRQS (256)
static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0};
diff --git a/hw/xen/xen_pt.h b/hw/xen/xen_pt.h
index 179775db7b..6e9cec95f3 100644
--- a/hw/xen/xen_pt.h
+++ b/hw/xen/xen_pt.h
@@ -5,6 +5,9 @@
#include "hw/pci/pci.h"
#include "xen-host-pci-device.h"
+bool xen_igd_gfx_pt_enabled(void);
+void xen_igd_gfx_pt_set(bool value, Error **errp);
+
void xen_pt_log(const PCIDevice *d, const char *f, ...) GCC_FMT_ATTR(2, 3);
#define XEN_PT_ERR(d, _f, _a...) xen_pt_log(d, "%s: Error: "_f, __func__, ##_a)
@@ -322,10 +325,9 @@ extern void *pci_assign_dev_load_option_rom(PCIDevice *dev,
unsigned int domain,
unsigned int bus, unsigned int slot,
unsigned int function);
-extern bool has_igd_gfx_passthru;
static inline bool is_igd_vga_passthrough(XenHostPCIDevice *dev)
{
- return (has_igd_gfx_passthru
+ return (xen_igd_gfx_pt_enabled()
&& ((dev->class_code >> 0x8) == PCI_CLASS_DISPLAY_VGA));
}
int xen_pt_register_vga_regions(XenHostPCIDevice *dev);
diff --git a/hw/xen/xen_pt_stub.c b/hw/xen/xen_pt_stub.c
new file mode 100644
index 0000000000..2d8cac8d54
--- /dev/null
+++ b/hw/xen/xen_pt_stub.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2020 Citrix Systems UK Ltd.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/xen/xen_pt.h"
+#include "qapi/error.h"
+
+bool xen_igd_gfx_pt_enabled(void)
+{
+ return false;
+}
+
+void xen_igd_gfx_pt_set(bool value, Error **errp)
+{
+ if (value) {
+ error_setg(errp, "Xen PCI passthrough support not built in");
+ }
+}
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index d14374bdd4..fb4e8a8e29 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -413,6 +413,7 @@ void dump_exec_info(void);
void dump_opcount_info(void);
#endif /* !CONFIG_USER_ONLY */
+/* Returns: 0 on success, -1 on error */
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
void *ptr, target_ulong len, bool is_write);
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index b47e5630e7..d5e285d2b5 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -43,9 +43,6 @@ extern ram_addr_t ram_size;
/* memory API */
-typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
-typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
-
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
/* This should not be used by devices. */
ram_addr_t qemu_ram_addr_from_host(void *ptr);
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 3e00cdbbfa..7207025bd4 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -50,12 +50,6 @@
extern bool global_dirty_log;
typedef struct MemoryRegionOps MemoryRegionOps;
-typedef struct MemoryRegionMmio MemoryRegionMmio;
-
-struct MemoryRegionMmio {
- CPUReadMemoryFunc *read[3];
- CPUWriteMemoryFunc *write[3];
-};
typedef struct IOMMUTLBEntry IOMMUTLBEntry;
@@ -1984,7 +1978,7 @@ void memory_global_dirty_log_start(void);
*/
void memory_global_dirty_log_stop(void);
-void mtree_info(bool flatview, bool dispatch_tree, bool owner);
+void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
/**
* memory_region_dispatch_read: perform a read directly to the specified
@@ -2314,7 +2308,8 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
/* address_space_map: map a physical memory region into a host virtual address
*
* May map a subset of the requested range, given by and returned in @plen.
- * May return %NULL if resources needed to perform the mapping are exhausted.
+ * May return %NULL and set *@plen to zero(0), if resources needed to perform
+ * the mapping are exhausted.
* Use only for reads OR writes - not for read-modify-write operations.
* Use cpu_register_map_client() to know when retrying the map operation is
* likely to succeed.
@@ -2354,10 +2349,11 @@ void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
/* Internal functions, part of the implementation of address_space_read_cached
* and address_space_write_cached. */
-void address_space_read_cached_slow(MemoryRegionCache *cache,
- hwaddr addr, void *buf, hwaddr len);
-void address_space_write_cached_slow(MemoryRegionCache *cache,
- hwaddr addr, const void *buf, hwaddr len);
+MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
+ hwaddr addr, void *buf, hwaddr len);
+MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
+ hwaddr addr, const void *buf,
+ hwaddr len);
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
{
@@ -2422,15 +2418,16 @@ MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
* @buf: buffer with the data transferred
* @len: length of the data transferred
*/
-static inline void
+static inline MemTxResult
address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
void *buf, hwaddr len)
{
assert(addr < cache->len && len <= cache->len - addr);
if (likely(cache->ptr)) {
memcpy(buf, cache->ptr + addr, len);
+ return MEMTX_OK;
} else {
- address_space_read_cached_slow(cache, addr, buf, len);
+ return address_space_read_cached_slow(cache, addr, buf, len);
}
}
@@ -2442,15 +2439,16 @@ address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
* @buf: buffer with the data transferred
* @len: length of the data transferred
*/
-static inline void
+static inline MemTxResult
address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
const void *buf, hwaddr len)
{
assert(addr < cache->len && len <= cache->len - addr);
if (likely(cache->ptr)) {
memcpy(cache->ptr + addr, buf, len);
+ return MEMTX_OK;
} else {
- address_space_write_cached_slow(cache, addr, buf, len);
+ return address_space_write_cached_slow(cache, addr, buf, len);
}
}
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index b295f6a784..7b5c24e928 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -21,7 +21,7 @@
#ifndef CONFIG_USER_ONLY
#include "cpu.h"
-#include "hw/xen/xen.h"
+#include "sysemu/xen.h"
#include "sysemu/tcg.h"
#include "exec/ramlist.h"
#include "exec/ramblock.h"
diff --git a/include/hw/display/edid.h b/include/hw/display/edid.h
index 23371ee82c..5b1de57f24 100644
--- a/include/hw/display/edid.h
+++ b/include/hw/display/edid.h
@@ -1,9 +1,6 @@
#ifndef EDID_H
#define EDID_H
-#include "qom/object.h"
-#include "hw/qdev-properties.h"
-
typedef struct qemu_edid_info {
const char *vendor; /* http://www.uefi.org/pnp_id_list */
const char *name;
diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h
index 398a4a2c85..6fdff3dced 100644
--- a/include/hw/elf_ops.h
+++ b/include/hw/elf_ops.h
@@ -553,9 +553,14 @@ static int glue(load_elf, SZ)(const char *name, int fd,
rom_add_elf_program(label, mapped_file, data, file_size,
mem_size, addr, as);
} else {
- address_space_write(as ? as : &address_space_memory,
- addr, MEMTXATTRS_UNSPECIFIED,
- data, file_size);
+ MemTxResult res;
+
+ res = address_space_write(as ? as : &address_space_memory,
+ addr, MEMTXATTRS_UNSPECIFIED,
+ data, file_size);
+ if (res != MEMTX_OK) {
+ goto fail;
+ }
}
}
diff --git a/include/hw/hyperv/hyperv.h b/include/hw/hyperv/hyperv.h
index 597381cb01..a63ee0003c 100644
--- a/include/hw/hyperv/hyperv.h
+++ b/include/hw/hyperv/hyperv.h
@@ -79,5 +79,6 @@ void hyperv_synic_add(CPUState *cs);
void hyperv_synic_reset(CPUState *cs);
void hyperv_synic_update(CPUState *cs, bool enable,
hwaddr msg_page_addr, hwaddr event_page_addr);
+bool hyperv_is_synic_enabled(void);
#endif
diff --git a/include/hw/hyperv/vmbus-bridge.h b/include/hw/hyperv/vmbus-bridge.h
new file mode 100644
index 0000000000..c0a06d832c
--- /dev/null
+++ b/include/hw/hyperv/vmbus-bridge.h
@@ -0,0 +1,35 @@
+/*
+ * QEMU Hyper-V VMBus root bridge
+ *
+ * Copyright (c) 2017-2018 Virtuozzo International GmbH.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_HYPERV_VMBUS_BRIDGE_H
+#define HW_HYPERV_VMBUS_BRIDGE_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_VMBUS_BRIDGE "vmbus-bridge"
+
+typedef struct VMBus VMBus;
+
+typedef struct VMBusBridge {
+ SysBusDevice parent_obj;
+
+ uint8_t irq0;
+ uint8_t irq1;
+
+ VMBus *bus;
+} VMBusBridge;
+
+#define VMBUS_BRIDGE(obj) OBJECT_CHECK(VMBusBridge, (obj), TYPE_VMBUS_BRIDGE)
+
+static inline VMBusBridge *vmbus_bridge_find(void)
+{
+ return VMBUS_BRIDGE(object_resolve_path_type("", TYPE_VMBUS_BRIDGE, NULL));
+}
+
+#endif
diff --git a/include/hw/hyperv/vmbus-proto.h b/include/hw/hyperv/vmbus-proto.h
new file mode 100644
index 0000000000..4628d3b323
--- /dev/null
+++ b/include/hw/hyperv/vmbus-proto.h
@@ -0,0 +1,222 @@
+/*
+ * QEMU Hyper-V VMBus support
+ *
+ * Copyright (c) 2017-2018 Virtuozzo International GmbH.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_HYPERV_VMBUS_PROTO_H
+#define HW_HYPERV_VMBUS_PROTO_H
+
+#define VMBUS_VERSION_WS2008 ((0 << 16) | (13))
+#define VMBUS_VERSION_WIN7 ((1 << 16) | (1))
+#define VMBUS_VERSION_WIN8 ((2 << 16) | (4))
+#define VMBUS_VERSION_WIN8_1 ((3 << 16) | (0))
+#define VMBUS_VERSION_WIN10 ((4 << 16) | (0))
+#define VMBUS_VERSION_INVAL -1
+#define VMBUS_VERSION_CURRENT VMBUS_VERSION_WIN10
+
+#define VMBUS_MESSAGE_CONNECTION_ID 1
+#define VMBUS_EVENT_CONNECTION_ID 2
+#define VMBUS_MONITOR_CONNECTION_ID 3
+#define VMBUS_SINT 2
+
+#define VMBUS_MSG_INVALID 0
+#define VMBUS_MSG_OFFERCHANNEL 1
+#define VMBUS_MSG_RESCIND_CHANNELOFFER 2
+#define VMBUS_MSG_REQUESTOFFERS 3
+#define VMBUS_MSG_ALLOFFERS_DELIVERED 4
+#define VMBUS_MSG_OPENCHANNEL 5
+#define VMBUS_MSG_OPENCHANNEL_RESULT 6
+#define VMBUS_MSG_CLOSECHANNEL 7
+#define VMBUS_MSG_GPADL_HEADER 8
+#define VMBUS_MSG_GPADL_BODY 9
+#define VMBUS_MSG_GPADL_CREATED 10
+#define VMBUS_MSG_GPADL_TEARDOWN 11
+#define VMBUS_MSG_GPADL_TORNDOWN 12
+#define VMBUS_MSG_RELID_RELEASED 13
+#define VMBUS_MSG_INITIATE_CONTACT 14
+#define VMBUS_MSG_VERSION_RESPONSE 15
+#define VMBUS_MSG_UNLOAD 16
+#define VMBUS_MSG_UNLOAD_RESPONSE 17
+#define VMBUS_MSG_COUNT 18
+
+#define VMBUS_MESSAGE_SIZE_ALIGN sizeof(uint64_t)
+
+#define VMBUS_PACKET_INVALID 0x0
+#define VMBUS_PACKET_SYNCH 0x1
+#define VMBUS_PACKET_ADD_XFER_PAGESET 0x2
+#define VMBUS_PACKET_RM_XFER_PAGESET 0x3
+#define VMBUS_PACKET_ESTABLISH_GPADL 0x4
+#define VMBUS_PACKET_TEARDOWN_GPADL 0x5
+#define VMBUS_PACKET_DATA_INBAND 0x6
+#define VMBUS_PACKET_DATA_USING_XFER_PAGES 0x7
+#define VMBUS_PACKET_DATA_USING_GPADL 0x8
+#define VMBUS_PACKET_DATA_USING_GPA_DIRECT 0x9
+#define VMBUS_PACKET_CANCEL_REQUEST 0xa
+#define VMBUS_PACKET_COMP 0xb
+#define VMBUS_PACKET_DATA_USING_ADDITIONAL_PKT 0xc
+#define VMBUS_PACKET_ADDITIONAL_DATA 0xd
+
+#define VMBUS_CHANNEL_USER_DATA_SIZE 120
+
+#define VMBUS_OFFER_MONITOR_ALLOCATED 0x1
+#define VMBUS_OFFER_INTERRUPT_DEDICATED 0x1
+
+#define VMBUS_RING_BUFFER_FEAT_PENDING_SZ (1ul << 0)
+
+#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 0x1
+#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 0x2
+#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 0x4
+#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
+#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
+#define VMBUS_CHANNEL_PARENT_OFFER 0x200
+#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
+#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
+
+#define VMBUS_PACKET_FLAG_REQUEST_COMPLETION 1
+
+typedef struct vmbus_message_header {
+ uint32_t message_type;
+ uint32_t _padding;
+} vmbus_message_header;
+
+typedef struct vmbus_message_initiate_contact {
+ vmbus_message_header header;
+ uint32_t version_requested;
+ uint32_t target_vcpu;
+ uint64_t interrupt_page;
+ uint64_t monitor_page1;
+ uint64_t monitor_page2;
+} vmbus_message_initiate_contact;
+
+typedef struct vmbus_message_version_response {
+ vmbus_message_header header;
+ uint8_t version_supported;
+ uint8_t status;
+} vmbus_message_version_response;
+
+typedef struct vmbus_message_offer_channel {
+ vmbus_message_header header;
+ uint8_t type_uuid[16];
+ uint8_t instance_uuid[16];
+ uint64_t _reserved1;
+ uint64_t _reserved2;
+ uint16_t channel_flags;
+ uint16_t mmio_size_mb;
+ uint8_t user_data[VMBUS_CHANNEL_USER_DATA_SIZE];
+ uint16_t sub_channel_index;
+ uint16_t _reserved3;
+ uint32_t child_relid;
+ uint8_t monitor_id;
+ uint8_t monitor_flags;
+ uint16_t interrupt_flags;
+ uint32_t connection_id;
+} vmbus_message_offer_channel;
+
+typedef struct vmbus_message_rescind_channel_offer {
+ vmbus_message_header header;
+ uint32_t child_relid;
+} vmbus_message_rescind_channel_offer;
+
+typedef struct vmbus_gpa_range {
+ uint32_t byte_count;
+ uint32_t byte_offset;
+ uint64_t pfn_array[];
+} vmbus_gpa_range;
+
+typedef struct vmbus_message_gpadl_header {
+ vmbus_message_header header;
+ uint32_t child_relid;
+ uint32_t gpadl_id;
+ uint16_t range_buflen;
+ uint16_t rangecount;
+ vmbus_gpa_range range[];
+} QEMU_PACKED vmbus_message_gpadl_header;
+
+typedef struct vmbus_message_gpadl_body {
+ vmbus_message_header header;
+ uint32_t message_number;
+ uint32_t gpadl_id;
+ uint64_t pfn_array[];
+} vmbus_message_gpadl_body;
+
+typedef struct vmbus_message_gpadl_created {
+ vmbus_message_header header;
+ uint32_t child_relid;
+ uint32_t gpadl_id;
+ uint32_t status;
+} vmbus_message_gpadl_created;
+
+typedef struct vmbus_message_gpadl_teardown {
+ vmbus_message_header header;
+ uint32_t child_relid;
+ uint32_t gpadl_id;
+} vmbus_message_gpadl_teardown;
+
+typedef struct vmbus_message_gpadl_torndown {
+ vmbus_message_header header;
+ uint32_t gpadl_id;
+} vmbus_message_gpadl_torndown;
+
+typedef struct vmbus_message_open_channel {
+ vmbus_message_header header;
+ uint32_t child_relid;
+ uint32_t open_id;
+ uint32_t ring_buffer_gpadl_id;
+ uint32_t target_vp;
+ uint32_t ring_buffer_offset;
+ uint8_t user_data[VMBUS_CHANNEL_USER_DATA_SIZE];
+} vmbus_message_open_channel;
+
+typedef struct vmbus_message_open_result {
+ vmbus_message_header header;
+ uint32_t child_relid;
+ uint32_t open_id;
+ uint32_t status;
+} vmbus_message_open_result;
+
+typedef struct vmbus_message_close_channel {
+ vmbus_message_header header;
+ uint32_t child_relid;
+} vmbus_message_close_channel;
+
+typedef struct vmbus_ring_buffer {
+ uint32_t write_index;
+ uint32_t read_index;
+ uint32_t interrupt_mask;
+ uint32_t pending_send_sz;
+ uint32_t _reserved1[12];
+ uint32_t feature_bits;
+} vmbus_ring_buffer;
+
+typedef struct vmbus_packet_hdr {
+ uint16_t type;
+ uint16_t offset_qwords;
+ uint16_t len_qwords;
+ uint16_t flags;
+ uint64_t transaction_id;
+} vmbus_packet_hdr;
+
+typedef struct vmbus_pkt_gpa_direct {
+ uint32_t _reserved;
+ uint32_t rangecount;
+ vmbus_gpa_range range[];
+} vmbus_pkt_gpa_direct;
+
+typedef struct vmbus_xferpg_range {
+ uint32_t byte_count;
+ uint32_t byte_offset;
+} vmbus_xferpg_range;
+
+typedef struct vmbus_pkt_xferpg {
+ uint16_t buffer_id;
+ uint8_t sender_owns_set;
+ uint8_t _reserved;
+ uint32_t rangecount;
+ vmbus_xferpg_range range[];
+} vmbus_pkt_xferpg;
+
+#endif
diff --git a/include/hw/hyperv/vmbus.h b/include/hw/hyperv/vmbus.h
new file mode 100644
index 0000000000..40e8417eec
--- /dev/null
+++ b/include/hw/hyperv/vmbus.h
@@ -0,0 +1,230 @@
+/*
+ * QEMU Hyper-V VMBus
+ *
+ * Copyright (c) 2017-2018 Virtuozzo International GmbH.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_HYPERV_VMBUS_H
+#define HW_HYPERV_VMBUS_H
+
+#include "sysemu/sysemu.h"
+#include "sysemu/dma.h"
+#include "hw/qdev-core.h"
+#include "migration/vmstate.h"
+#include "hw/hyperv/vmbus-proto.h"
+#include "qemu/uuid.h"
+
+#define TYPE_VMBUS_DEVICE "vmbus-dev"
+
+#define VMBUS_DEVICE(obj) \
+ OBJECT_CHECK(VMBusDevice, (obj), TYPE_VMBUS_DEVICE)
+#define VMBUS_DEVICE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VMBusDeviceClass, (klass), TYPE_VMBUS_DEVICE)
+#define VMBUS_DEVICE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VMBusDeviceClass, (obj), TYPE_VMBUS_DEVICE)
+
+/*
+ * Object wrapping a GPADL -- GPA Descriptor List -- an array of guest physical
+ * pages, to be used for various buffers shared between the host and the guest.
+ */
+typedef struct VMBusGpadl VMBusGpadl;
+/*
+ * VMBus channel -- a pair of ring buffers for either direction, placed within
+ * one GPADL, and the associated notification means.
+ */
+typedef struct VMBusChannel VMBusChannel;
+/*
+ * Base class for VMBus devices. Includes one or more channels. Identified by
+ * class GUID and instance GUID.
+ */
+typedef struct VMBusDevice VMBusDevice;
+
+typedef void(*VMBusChannelNotifyCb)(struct VMBusChannel *chan);
+
+typedef struct VMBusDeviceClass {
+ DeviceClass parent;
+
+ QemuUUID classid;
+ QemuUUID instanceid; /* Fixed UUID for singleton devices */
+ uint16_t channel_flags;
+ uint16_t mmio_size_mb;
+
+ /* Extentions to standard device callbacks */
+ void (*vmdev_realize)(VMBusDevice *vdev, Error **errp);
+ void (*vmdev_unrealize)(VMBusDevice *vdev);
+ void (*vmdev_reset)(VMBusDevice *vdev);
+ /*
+ * Calculate the number of channels based on the device properties. Called
+ * at realize time.
+ **/
+ uint16_t (*num_channels)(VMBusDevice *vdev);
+ /*
+ * Device-specific actions to complete the otherwise successful process of
+ * opening a channel.
+ * Return 0 on success, -errno on failure.
+ */
+ int (*open_channel)(VMBusChannel *chan);
+ /*
+ * Device-specific actions to perform before closing a channel.
+ */
+ void (*close_channel)(VMBusChannel *chan);
+ /*
+ * Main device worker; invoked in response to notifications from either
+ * side, when there's work to do with the data in the channel ring buffers.
+ */
+ VMBusChannelNotifyCb chan_notify_cb;
+} VMBusDeviceClass;
+
+struct VMBusDevice {
+ DeviceState parent;
+ QemuUUID instanceid;
+ uint16_t num_channels;
+ VMBusChannel *channels;
+ AddressSpace *dma_as;
+};
+
+extern const VMStateDescription vmstate_vmbus_dev;
+
+/*
+ * A unit of work parsed out of a message in the receive (i.e. guest->host)
+ * ring buffer of a channel. It's supposed to be subclassed (through
+ * embedding) by the specific devices.
+ */
+typedef struct VMBusChanReq {
+ VMBusChannel *chan;
+ uint16_t pkt_type;
+ uint32_t msglen;
+ void *msg;
+ uint64_t transaction_id;
+ bool need_comp;
+ QEMUSGList sgl;
+} VMBusChanReq;
+
+VMBusDevice *vmbus_channel_device(VMBusChannel *chan);
+VMBusChannel *vmbus_device_channel(VMBusDevice *dev, uint32_t chan_idx);
+uint32_t vmbus_channel_idx(VMBusChannel *chan);
+bool vmbus_channel_is_open(VMBusChannel *chan);
+
+/*
+ * Notify (on guest's behalf) the host side of the channel that there's data in
+ * the ringbuffer to process.
+ */
+void vmbus_channel_notify_host(VMBusChannel *chan);
+
+/*
+ * Reserve space for a packet in the send (i.e. host->guest) ringbuffer. If
+ * there isn't enough room, indicate that to the guest, to be notified when it
+ * becomes available.
+ * Return 0 on success, negative errno on failure.
+ * The ringbuffer indices are NOT updated, the requested space indicator may.
+ */
+int vmbus_channel_reserve(VMBusChannel *chan,
+ uint32_t desclen, uint32_t msglen);
+
+/*
+ * Send a packet to the guest. The space for the packet MUST be reserved
+ * first.
+ * Return total number of bytes placed in the send ringbuffer on success,
+ * negative errno on failure.
+ * The ringbuffer indices are updated on success, and the guest is signaled if
+ * needed.
+ */
+ssize_t vmbus_channel_send(VMBusChannel *chan, uint16_t pkt_type,
+ void *desc, uint32_t desclen,
+ void *msg, uint32_t msglen,
+ bool need_comp, uint64_t transaction_id);
+
+/*
+ * Prepare to fetch a batch of packets from the receive ring buffer.
+ * Return 0 on success, negative errno on failure.
+ */
+int vmbus_channel_recv_start(VMBusChannel *chan);
+
+/*
+ * Shortcut for a common case of sending a simple completion packet with no
+ * auxiliary descriptors.
+ */
+ssize_t vmbus_channel_send_completion(VMBusChanReq *req,
+ void *msg, uint32_t msglen);
+
+/*
+ * Peek at the receive (i.e. guest->host) ring buffer and extract a unit of
+ * work (a device-specific subclass of VMBusChanReq) from a packet if there's
+ * one.
+ * Return an allocated buffer, containing the request of @size with filled
+ * VMBusChanReq at the beginning, followed by the message payload, or NULL on
+ * failure.
+ * The ringbuffer indices are NOT updated, nor is the private copy of the read
+ * index.
+ */
+void *vmbus_channel_recv_peek(VMBusChannel *chan, uint32_t size);
+
+/*
+ * Update the private copy of the read index once the preceding peek is deemed
+ * successful.
+ * The ringbuffer indices are NOT updated.
+ */
+void vmbus_channel_recv_pop(VMBusChannel *chan);
+
+/*
+ * Propagate the private copy of the read index into the receive ring buffer,
+ * and thus complete the reception of a series of packets. Notify guest if
+ * needed.
+ * Return the number of bytes popped off the receive ring buffer by the
+ * preceding recv_peek/recv_pop calls on success, negative errno on failure.
+ */
+ssize_t vmbus_channel_recv_done(VMBusChannel *chan);
+
+/*
+ * Free the request allocated by vmbus_channel_recv_peek, together with its
+ * fields.
+ */
+void vmbus_free_req(void *req);
+
+/*
+ * Find and reference a GPADL by @gpadl_id.
+ * If not found return NULL.
+ */
+VMBusGpadl *vmbus_get_gpadl(VMBusChannel *chan, uint32_t gpadl_id);
+
+/*
+ * Unreference @gpadl. If the reference count drops to zero, free it.
+ * @gpadl may be NULL, in which case nothing is done.
+ */
+void vmbus_put_gpadl(VMBusGpadl *gpadl);
+
+/*
+ * Calculate total length in bytes of @gpadl.
+ * @gpadl must be valid.
+ */
+uint32_t vmbus_gpadl_len(VMBusGpadl *gpadl);
+
+/*
+ * Copy data from @iov to @gpadl at offset @off.
+ * Return the number of bytes copied, or a negative status on failure.
+ */
+ssize_t vmbus_iov_to_gpadl(VMBusChannel *chan, VMBusGpadl *gpadl, uint32_t off,
+ const struct iovec *iov, size_t iov_cnt);
+
+/*
+ * Map SGList contained in the request @req, at offset @off and no more than
+ * @len bytes, for io in direction @dir, and populate @iov with the mapped
+ * iovecs.
+ * Return the number of iovecs mapped, or negative status on failure.
+ */
+int vmbus_map_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
+ unsigned iov_cnt, size_t len, size_t off);
+
+/*
+ * Unmap *iov mapped with vmbus_map_sgl, marking the number of bytes @accessed.
+ */
+void vmbus_unmap_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
+ unsigned iov_cnt, size_t accessed);
+
+void vmbus_save_req(QEMUFile *f, VMBusChanReq *req);
+void *vmbus_load_req(QEMUFile *f, VMBusDevice *dev, uint32_t size);
+
+#endif
diff --git a/include/hw/i386/vmport.h b/include/hw/i386/vmport.h
new file mode 100644
index 0000000000..c380b9c1f0
--- /dev/null
+++ b/include/hw/i386/vmport.h
@@ -0,0 +1,28 @@
+#ifndef HW_VMPORT_H
+#define HW_VMPORT_H
+
+#include "hw/isa/isa.h"
+
+#define TYPE_VMPORT "vmport"
+typedef uint32_t (VMPortReadFunc)(void *opaque, uint32_t address);
+
+typedef enum {
+ VMPORT_CMD_GETVERSION = 10,
+ VMPORT_CMD_GETBIOSUUID = 19,
+ VMPORT_CMD_GETRAMSIZE = 20,
+ VMPORT_CMD_VMMOUSE_DATA = 39,
+ VMPORT_CMD_VMMOUSE_STATUS = 40,
+ VMPORT_CMD_VMMOUSE_COMMAND = 41,
+ VMPORT_CMD_GETHZ = 45,
+ VMPORT_CMD_GET_VCPU_INFO = 68,
+ VMPORT_ENTRIES
+} VMPortCommand;
+
+static inline void vmport_init(ISABus *bus)
+{
+ isa_create_simple(bus, TYPE_VMPORT);
+}
+
+void vmport_register(VMPortCommand command, VMPortReadFunc *func, void *opaque);
+
+#endif
diff --git a/include/hw/usb.h b/include/hw/usb.h
index 1cf1cd9584..e2128c7c45 100644
--- a/include/hw/usb.h
+++ b/include/hw/usb.h
@@ -474,36 +474,6 @@ bool usb_host_dev_is_scsi_storage(USBDevice *usbdev);
#define VM_USB_HUB_SIZE 8
-/* hw/usb/hdc-musb.c */
-
-enum musb_irq_source_e {
- musb_irq_suspend = 0,
- musb_irq_resume,
- musb_irq_rst_babble,
- musb_irq_sof,
- musb_irq_connect,
- musb_irq_disconnect,
- musb_irq_vbus_request,
- musb_irq_vbus_error,
- musb_irq_rx,
- musb_irq_tx,
- musb_set_vbus,
- musb_set_session,
- /* Add new interrupts here */
- musb_irq_max, /* total number of interrupts defined */
-};
-
-typedef struct MUSBState MUSBState;
-
-extern CPUReadMemoryFunc * const musb_read[];
-extern CPUWriteMemoryFunc * const musb_write[];
-
-MUSBState *musb_init(DeviceState *parent_device, int gpio_base);
-void musb_reset(MUSBState *s);
-uint32_t musb_core_intr_get(MUSBState *s);
-void musb_core_intr_clear(MUSBState *s, uint32_t mask);
-void musb_set_size(MUSBState *s, int epnum, int size, int is_tx);
-
/* usb-bus.c */
#define TYPE_USB_BUS "usb-bus"
diff --git a/include/hw/usb/hcd-musb.h b/include/hw/usb/hcd-musb.h
new file mode 100644
index 0000000000..c874b9f292
--- /dev/null
+++ b/include/hw/usb/hcd-musb.h
@@ -0,0 +1,47 @@
+/*
+ * "Inventra" High-speed Dual-Role Controller (MUSB-HDRC), Mentor Graphics,
+ * USB2.0 OTG compliant core used in various chips.
+ *
+ * Only host-mode and non-DMA accesses are currently supported.
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Written by Andrzej Zaborowski <balrog@zabor.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_USB_MUSB_H
+#define HW_USB_MUSB_H
+
+enum musb_irq_source_e {
+ musb_irq_suspend = 0,
+ musb_irq_resume,
+ musb_irq_rst_babble,
+ musb_irq_sof,
+ musb_irq_connect,
+ musb_irq_disconnect,
+ musb_irq_vbus_request,
+ musb_irq_vbus_error,
+ musb_irq_rx,
+ musb_irq_tx,
+ musb_set_vbus,
+ musb_set_session,
+ /* Add new interrupts here */
+ musb_irq_max /* total number of interrupts defined */
+};
+
+/* TODO convert hcd-musb to QOM/qdev and remove MUSBReadFunc/MUSBWriteFunc */
+typedef void MUSBWriteFunc(void *opaque, hwaddr addr, uint32_t value);
+typedef uint32_t MUSBReadFunc(void *opaque, hwaddr addr);
+extern MUSBReadFunc * const musb_read[];
+extern MUSBWriteFunc * const musb_write[];
+
+typedef struct MUSBState MUSBState;
+
+MUSBState *musb_init(DeviceState *parent_device, int gpio_base);
+void musb_reset(MUSBState *s);
+uint32_t musb_core_intr_get(MUSBState *s);
+void musb_core_intr_clear(MUSBState *s, uint32_t mask);
+void musb_set_size(MUSBState *s, int epnum, int size, int is_tx);
+
+#endif
diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
index 5ac1c6dc55..771dd447f2 100644
--- a/include/hw/xen/xen.h
+++ b/include/hw/xen/xen.h
@@ -20,13 +20,6 @@ extern uint32_t xen_domid;
extern enum xen_mode xen_mode;
extern bool xen_domid_restrict;
-extern bool xen_allowed;
-
-static inline bool xen_enabled(void)
-{
- return xen_allowed;
-}
-
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num);
void xen_piix3_set_irq(void *opaque, int irq_num, int level);
void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len);
@@ -39,10 +32,6 @@ void xenstore_store_pv_console_info(int i, struct Chardev *chr);
void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory);
-void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
- struct MemoryRegion *mr, Error **errp);
-void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length);
-
void xen_register_framebuffer(struct MemoryRegion *mr);
#endif /* QEMU_HW_XEN_H */
diff --git a/include/io/task.h b/include/io/task.h
index 1abbfb8b65..6818dfedd0 100644
--- a/include/io/task.h
+++ b/include/io/task.h
@@ -21,8 +21,6 @@
#ifndef QIO_TASK_H
#define QIO_TASK_H
-#include "qom/object.h"
-
typedef struct QIOTask QIOTask;
typedef void (*QIOTaskFunc)(QIOTask *task,
diff --git a/include/qemu/thread.h b/include/qemu/thread.h
index d22848138e..06c058fb58 100644
--- a/include/qemu/thread.h
+++ b/include/qemu/thread.h
@@ -177,7 +177,7 @@ void qemu_thread_create(QemuThread *thread, const char *name,
void *qemu_thread_join(QemuThread *thread);
void qemu_thread_get_self(QemuThread *thread);
bool qemu_thread_is_self(QemuThread *thread);
-void qemu_thread_exit(void *retval);
+void qemu_thread_exit(void *retval) QEMU_NORETURN;
void qemu_thread_naming(bool enable);
struct Notifier;
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index ecf3cde26c..ce4a78b687 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -51,7 +51,6 @@ typedef struct FWCfgIoState FWCfgIoState;
typedef struct FWCfgMemState FWCfgMemState;
typedef struct FWCfgState FWCfgState;
typedef struct HostMemoryBackend HostMemoryBackend;
-typedef struct HVFX86EmulatorState HVFX86EmulatorState;
typedef struct I2CBus I2CBus;
typedef struct I2SCodec I2SCodec;
typedef struct IOMMUMemoryRegion IOMMUMemoryRegion;
@@ -76,6 +75,7 @@ typedef struct NetFilterState NetFilterState;
typedef struct NICInfo NICInfo;
typedef struct NodeInfo NodeInfo;
typedef struct NumaNodeMem NumaNodeMem;
+typedef struct Object Object;
typedef struct ObjectClass ObjectClass;
typedef struct PCIBridge PCIBridge;
typedef struct PCIBus PCIBus;
diff --git a/include/qom/object.h b/include/qom/object.h
index fd453dc8d6..c7c97ead60 100644
--- a/include/qom/object.h
+++ b/include/qom/object.h
@@ -20,8 +20,6 @@
struct TypeImpl;
typedef struct TypeImpl *Type;
-typedef struct Object Object;
-
typedef struct TypeInfo TypeInfo;
typedef struct InterfaceClass InterfaceClass;
diff --git a/include/qom/qom-qobject.h b/include/qom/qom-qobject.h
index 77cd717e3f..82136e6e80 100644
--- a/include/qom/qom-qobject.h
+++ b/include/qom/qom-qobject.h
@@ -13,8 +13,6 @@
#ifndef QEMU_QOM_QOBJECT_H
#define QEMU_QOM_QOBJECT_H
-#include "qom/object.h"
-
/*
* object_property_get_qobject:
* @obj: the object
diff --git a/include/sysemu/accel.h b/include/sysemu/accel.h
index 47e5788530..e08b8ab8fa 100644
--- a/include/sysemu/accel.h
+++ b/include/sysemu/accel.h
@@ -37,10 +37,12 @@ typedef struct AccelClass {
/*< public >*/
const char *name;
+#ifndef CONFIG_USER_ONLY
int (*init_machine)(MachineState *ms);
void (*setup_post)(MachineState *ms, AccelState *accel);
bool (*has_memory)(MachineState *ms, AddressSpace *as,
hwaddr start_addr, hwaddr size);
+#endif
bool *allowed;
/*
* Array of global properties that would be applied when specific
diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h
index d211e808e9..5214ed5202 100644
--- a/include/sysemu/hvf.h
+++ b/include/sysemu/hvf.h
@@ -13,89 +13,23 @@
#ifndef HVF_H
#define HVF_H
-#include "cpu.h"
-#include "qemu/bitops.h"
-#include "exec/memory.h"
-#include "sysemu/accel.h"
-
-extern bool hvf_allowed;
#ifdef CONFIG_HVF
-#include <Hypervisor/hv.h>
-#include <Hypervisor/hv_vmx.h>
-#include <Hypervisor/hv_error.h>
-#include "target/i386/cpu.h"
uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
int reg);
+extern bool hvf_allowed;
#define hvf_enabled() (hvf_allowed)
-#else
+#else /* !CONFIG_HVF */
#define hvf_enabled() 0
#define hvf_get_supported_cpuid(func, idx, reg) 0
-#endif
-
-/* hvf_slot flags */
-#define HVF_SLOT_LOG (1 << 0)
-
-typedef struct hvf_slot {
- uint64_t start;
- uint64_t size;
- uint8_t *mem;
- int slot_id;
- uint32_t flags;
- MemoryRegion *region;
-} hvf_slot;
-
-typedef struct hvf_vcpu_caps {
- uint64_t vmx_cap_pinbased;
- uint64_t vmx_cap_procbased;
- uint64_t vmx_cap_procbased2;
- uint64_t vmx_cap_entry;
- uint64_t vmx_cap_exit;
- uint64_t vmx_cap_preemption_timer;
-} hvf_vcpu_caps;
-
-typedef struct HVFState {
- AccelState parent;
- hvf_slot slots[32];
- int num_slots;
-
- hvf_vcpu_caps *hvf_caps;
-} HVFState;
-extern HVFState *hvf_state;
-
-void hvf_set_phys_mem(MemoryRegionSection *, bool);
-void hvf_handle_io(CPUArchState *, uint16_t, void *,
- int, int, int);
-hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
-
-/* Disable HVF if |disable| is 1, otherwise, enable it iff it is supported by
- * the host CPU. Use hvf_enabled() after this to get the result. */
-void hvf_disable(int disable);
-
-/* Returns non-0 if the host CPU supports the VMX "unrestricted guest" feature
- * which allows the virtual CPU to directly run in "real mode". If true, this
- * allows QEMU to run several vCPU threads in parallel (see cpus.c). Otherwise,
- * only a a single TCG thread can run, and it will call HVF to run the current
- * instructions, except in case of "real mode" (paging disabled, typically at
- * boot time), or MMIO operations. */
-
-int hvf_sync_vcpus(void);
+#endif /* !CONFIG_HVF */
int hvf_init_vcpu(CPUState *);
int hvf_vcpu_exec(CPUState *);
-int hvf_smp_cpu_exec(CPUState *);
void hvf_cpu_synchronize_state(CPUState *);
void hvf_cpu_synchronize_post_reset(CPUState *);
void hvf_cpu_synchronize_post_init(CPUState *);
-void _hvf_cpu_synchronize_post_init(CPUState *, run_on_cpu_data);
-
void hvf_vcpu_destroy(CPUState *);
-void hvf_raise_event(CPUState *);
-/* void hvf_reset_vcpu_state(void *opaque); */
void hvf_reset_vcpu(CPUState *);
-void vmx_update_tpr(CPUState *);
-void update_apic_tpr(CPUState *);
-int hvf_put_registers(CPUState *);
-void vmx_clear_int_window_exiting(CPUState *cpu);
#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index 3b2250471c..b4174d941c 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -554,4 +554,8 @@ int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source);
int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target);
struct ppc_radix_page_info *kvm_get_radix_page_info(void);
int kvm_get_max_memslots(void);
+
+/* Notify resamplefd for EOI of specific interrupts. */
+void kvm_resample_fd_notify(int gsi);
+
#endif
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index 3efccdba7e..4b6a5c459c 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -5,7 +5,6 @@
#include "qemu/timer.h"
#include "qemu/notify.h"
#include "qemu/uuid.h"
-#include "qom/object.h"
/* vl.c */
diff --git a/include/sysemu/tcg.h b/include/sysemu/tcg.h
index 7d116d2e80..d9d3ca8559 100644
--- a/include/sysemu/tcg.h
+++ b/include/sysemu/tcg.h
@@ -8,9 +8,9 @@
#ifndef SYSEMU_TCG_H
#define SYSEMU_TCG_H
-extern bool tcg_allowed;
void tcg_exec_init(unsigned long tb_size);
#ifdef CONFIG_TCG
+extern bool tcg_allowed;
#define tcg_enabled() (tcg_allowed)
#else
#define tcg_enabled() 0
diff --git a/include/sysemu/xen.h b/include/sysemu/xen.h
new file mode 100644
index 0000000000..1ca292715e
--- /dev/null
+++ b/include/sysemu/xen.h
@@ -0,0 +1,38 @@
+/*
+ * QEMU Xen support
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef SYSEMU_XEN_H
+#define SYSEMU_XEN_H
+
+#ifdef CONFIG_XEN
+
+bool xen_enabled(void);
+
+#ifndef CONFIG_USER_ONLY
+void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length);
+void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
+ struct MemoryRegion *mr, Error **errp);
+#endif
+
+#else /* !CONFIG_XEN */
+
+#define xen_enabled() 0
+#ifndef CONFIG_USER_ONLY
+static inline void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
+{
+ /* nothing */
+}
+static inline void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
+ MemoryRegion *mr, Error **errp)
+{
+ g_assert_not_reached();
+}
+#endif
+
+#endif /* CONFIG_XEN */
+
+#endif
diff --git a/io/task.c b/io/task.c
index 1ae7b86488..53c0bed686 100644
--- a/io/task.c
+++ b/io/task.c
@@ -22,6 +22,7 @@
#include "io/task.h"
#include "qapi/error.h"
#include "qemu/thread.h"
+#include "qom/object.h"
#include "trace.h"
struct QIOTaskThreadData {
diff --git a/memory.c b/memory.c
index 91ceaf9fcf..2f15a4b250 100644
--- a/memory.c
+++ b/memory.c
@@ -2882,7 +2882,7 @@ static void mtree_print_mr_owner(const MemoryRegion *mr)
static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
hwaddr base,
MemoryRegionListHead *alias_print_queue,
- bool owner)
+ bool owner, bool display_disabled)
{
MemoryRegionList *new_ml, *ml, *next_ml;
MemoryRegionListHead submr_print_queue;
@@ -2894,10 +2894,6 @@ static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
return;
}
- for (i = 0; i < level; i++) {
- qemu_printf(MTREE_INDENT);
- }
-
cur_start = base + mr->addr;
cur_end = cur_start + MR_SIZE(mr->size);
@@ -2926,35 +2922,46 @@ static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
ml->mr = mr->alias;
QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
}
- qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
- " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
- "-" TARGET_FMT_plx "%s",
- cur_start, cur_end,
- mr->priority,
- mr->nonvolatile ? "nv-" : "",
- memory_region_type((MemoryRegion *)mr),
- memory_region_name(mr),
- memory_region_name(mr->alias),
- mr->alias_offset,
- mr->alias_offset + MR_SIZE(mr->size),
- mr->enabled ? "" : " [disabled]");
- if (owner) {
- mtree_print_mr_owner(mr);
+ if (mr->enabled || display_disabled) {
+ for (i = 0; i < level; i++) {
+ qemu_printf(MTREE_INDENT);
+ }
+ qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
+ " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
+ "-" TARGET_FMT_plx "%s",
+ cur_start, cur_end,
+ mr->priority,
+ mr->nonvolatile ? "nv-" : "",
+ memory_region_type((MemoryRegion *)mr),
+ memory_region_name(mr),
+ memory_region_name(mr->alias),
+ mr->alias_offset,
+ mr->alias_offset + MR_SIZE(mr->size),
+ mr->enabled ? "" : " [disabled]");
+ if (owner) {
+ mtree_print_mr_owner(mr);
+ }
+ qemu_printf("\n");
}
} else {
- qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
- " (prio %d, %s%s): %s%s",
- cur_start, cur_end,
- mr->priority,
- mr->nonvolatile ? "nv-" : "",
- memory_region_type((MemoryRegion *)mr),
- memory_region_name(mr),
- mr->enabled ? "" : " [disabled]");
- if (owner) {
- mtree_print_mr_owner(mr);
+ if (mr->enabled || display_disabled) {
+ for (i = 0; i < level; i++) {
+ qemu_printf(MTREE_INDENT);
+ }
+ qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
+ " (prio %d, %s%s): %s%s",
+ cur_start, cur_end,
+ mr->priority,
+ mr->nonvolatile ? "nv-" : "",
+ memory_region_type((MemoryRegion *)mr),
+ memory_region_name(mr),
+ mr->enabled ? "" : " [disabled]");
+ if (owner) {
+ mtree_print_mr_owner(mr);
+ }
+ qemu_printf("\n");
}
}
- qemu_printf("\n");
QTAILQ_INIT(&submr_print_queue);
@@ -2977,7 +2984,7 @@ static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
mtree_print_mr(ml->mr, level + 1, cur_start,
- alias_print_queue, owner);
+ alias_print_queue, owner, display_disabled);
}
QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
@@ -3088,7 +3095,7 @@ static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
return true;
}
-void mtree_info(bool flatview, bool dispatch_tree, bool owner)
+void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
{
MemoryRegionListHead ml_head;
MemoryRegionList *ml, *ml2;
@@ -3136,14 +3143,14 @@ void mtree_info(bool flatview, bool dispatch_tree, bool owner)
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
qemu_printf("address-space: %s\n", as->name);
- mtree_print_mr(as->root, 1, 0, &ml_head, owner);
+ mtree_print_mr(as->root, 1, 0, &ml_head, owner, disabled);
qemu_printf("\n");
}
/* print aliased regions */
QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
- mtree_print_mr(ml->mr, 1, 0, &ml_head, owner);
+ mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
qemu_printf("\n");
}
diff --git a/migration/savevm.c b/migration/savevm.c
index c00a6807d9..b979ea6e7f 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -28,7 +28,6 @@
#include "qemu/osdep.h"
#include "hw/boards.h"
-#include "hw/xen/xen.h"
#include "net/net.h"
#include "migration.h"
#include "migration/snapshot.h"
@@ -59,6 +58,7 @@
#include "sysemu/replay.h"
#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
+#include "sysemu/xen.h"
#include "qjson.h"
#include "migration/colo.h"
#include "qemu/bitmap.h"
diff --git a/monitor/misc.c b/monitor/misc.c
index f5207cd242..89bb970b00 100644
--- a/monitor/misc.c
+++ b/monitor/misc.c
@@ -957,8 +957,9 @@ static void hmp_info_mtree(Monitor *mon, const QDict *qdict)
bool flatview = qdict_get_try_bool(qdict, "flatview", false);
bool dispatch_tree = qdict_get_try_bool(qdict, "dispatch_tree", false);
bool owner = qdict_get_try_bool(qdict, "owner", false);
+ bool disabled = qdict_get_try_bool(qdict, "disabled", false);
- mtree_info(flatview, dispatch_tree, owner);
+ mtree_info(flatview, dispatch_tree, owner, disabled);
}
#ifdef CONFIG_PROFILER
diff --git a/qom/container.c b/qom/container.c
index 14e7ae485b..455e8410c6 100644
--- a/qom/container.c
+++ b/qom/container.c
@@ -16,7 +16,6 @@
static const TypeInfo container_info = {
.name = "container",
- .instance_size = sizeof(Object),
.parent = TYPE_OBJECT,
};
diff --git a/qom/object.c b/qom/object.c
index d0be42c8d6..b0ed560fd8 100644
--- a/qom/object.c
+++ b/qom/object.c
@@ -262,8 +262,7 @@ static void type_initialize_interface(TypeImpl *ti, TypeImpl *interface_type,
new_iface->concrete_class = ti->class;
new_iface->interface_type = interface_type;
- ti->class->interfaces = g_slist_append(ti->class->interfaces,
- iface_impl->class);
+ ti->class->interfaces = g_slist_append(ti->class->interfaces, new_iface);
}
static void object_property_free(gpointer data)
@@ -316,8 +315,6 @@ static void type_initialize(TypeImpl *ti)
g_assert(parent->instance_size <= ti->instance_size);
memcpy(ti->class, parent->class, parent->class_size);
ti->class->interfaces = NULL;
- ti->class->properties = g_hash_table_new_full(
- g_str_hash, g_str_equal, NULL, object_property_free);
for (e = parent->class->interfaces; e; e = e->next) {
InterfaceClass *iface = e->data;
@@ -347,11 +344,11 @@ static void type_initialize(TypeImpl *ti)
type_initialize_interface(ti, t, t);
}
- } else {
- ti->class->properties = g_hash_table_new_full(
- g_str_hash, g_str_equal, NULL, object_property_free);
}
+ ti->class->properties = g_hash_table_new_full(g_str_hash, g_str_equal, NULL,
+ object_property_free);
+
ti->class->type = ti;
while (parent) {
@@ -497,10 +494,8 @@ static void object_class_property_init_all(Object *obj)
}
}
-static void object_initialize_with_type(void *data, size_t size, TypeImpl *type)
+static void object_initialize_with_type(Object *obj, size_t size, TypeImpl *type)
{
- Object *obj = data;
-
type_initialize(type);
g_assert(type->instance_size >= sizeof(Object));
@@ -1051,7 +1046,10 @@ static int do_object_child_foreach(Object *obj,
break;
}
if (recurse) {
- do_object_child_foreach(child, fn, opaque, true);
+ ret = do_object_child_foreach(child, fn, opaque, true);
+ if (ret != 0) {
+ break;
+ }
}
}
}
@@ -1953,26 +1951,25 @@ Object *object_resolve_path_component(Object *parent, const char *part)
}
static Object *object_resolve_abs_path(Object *parent,
- char **parts,
- const char *typename,
- int index)
+ char **parts,
+ const char *typename)
{
Object *child;
- if (parts[index] == NULL) {
+ if (*parts == NULL) {
return object_dynamic_cast(parent, typename);
}
- if (strcmp(parts[index], "") == 0) {
- return object_resolve_abs_path(parent, parts, typename, index + 1);
+ if (strcmp(*parts, "") == 0) {
+ return object_resolve_abs_path(parent, parts + 1, typename);
}
- child = object_resolve_path_component(parent, parts[index]);
+ child = object_resolve_path_component(parent, *parts);
if (!child) {
return NULL;
}
- return object_resolve_abs_path(child, parts, typename, index + 1);
+ return object_resolve_abs_path(child, parts + 1, typename);
}
static Object *object_resolve_partial_path(Object *parent,
@@ -1984,7 +1981,7 @@ static Object *object_resolve_partial_path(Object *parent,
GHashTableIter iter;
ObjectProperty *prop;
- obj = object_resolve_abs_path(parent, parts, typename, 0);
+ obj = object_resolve_abs_path(parent, parts, typename);
g_hash_table_iter_init(&iter, parent->properties);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *)&prop)) {
@@ -2029,7 +2026,7 @@ Object *object_resolve_path_type(const char *path, const char *typename,
*ambiguousp = ambiguous;
}
} else {
- obj = object_resolve_abs_path(object_get_root(), parts, typename, 1);
+ obj = object_resolve_abs_path(object_get_root(), parts + 1, typename);
}
g_strfreev(parts);
diff --git a/replay/replay-internal.c b/replay/replay-internal.c
index eba8246aae..2e8a3e947a 100644
--- a/replay/replay-internal.c
+++ b/replay/replay-internal.c
@@ -22,6 +22,9 @@
It also protects replay events queue which stores events to be
written or read to the log. */
static QemuMutex lock;
+/* Condition and queue for fair ordering of mutex lock requests. */
+static QemuCond mutex_cond;
+static unsigned long mutex_head, mutex_tail;
/* File for replay writing */
static bool write_error;
@@ -197,9 +200,10 @@ static __thread bool replay_locked;
void replay_mutex_init(void)
{
qemu_mutex_init(&lock);
+ qemu_cond_init(&mutex_cond);
/* Hold the mutex while we start-up */
- qemu_mutex_lock(&lock);
replay_locked = true;
+ ++mutex_tail;
}
bool replay_mutex_locked(void)
@@ -211,10 +215,16 @@ bool replay_mutex_locked(void)
void replay_mutex_lock(void)
{
if (replay_mode != REPLAY_MODE_NONE) {
+ unsigned long id;
g_assert(!qemu_mutex_iothread_locked());
g_assert(!replay_mutex_locked());
qemu_mutex_lock(&lock);
+ id = mutex_tail++;
+ while (id != mutex_head) {
+ qemu_cond_wait(&mutex_cond, &lock);
+ }
replay_locked = true;
+ qemu_mutex_unlock(&lock);
}
}
@@ -222,7 +232,10 @@ void replay_mutex_unlock(void)
{
if (replay_mode != REPLAY_MODE_NONE) {
g_assert(replay_mutex_locked());
+ qemu_mutex_lock(&lock);
+ ++mutex_head;
replay_locked = false;
+ qemu_cond_broadcast(&mutex_cond);
qemu_mutex_unlock(&lock);
}
}
diff --git a/replay/replay.c b/replay/replay.c
index 706c7b4f4b..7d93746c73 100644
--- a/replay/replay.c
+++ b/replay/replay.c
@@ -366,6 +366,11 @@ void replay_finish(void)
/* finalize the file */
if (replay_file) {
if (replay_mode == REPLAY_MODE_RECORD) {
+ /*
+ * Can't do it in the signal handler, therefore
+ * add shutdown event here for the case of Ctrl-C.
+ */
+ replay_shutdown_request(SHUTDOWN_CAUSE_HOST_SIGNAL);
/* write end event */
replay_put_event(EVENT_END);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 0ba213e9f2..2d2e922d89 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1267,7 +1267,7 @@ sub checkfilename {
# files and when changing tests.
if ($name =~ m#^tests/data/acpi/# and not $name =~ m#^\.sh$#) {
$$acpi_testexpected = $name;
- } elsif ($name =~ m#^tests/qtest/bios-tables-test-allowed-diff.h$#) {
+ } elsif ($name !~ m#^tests/qtest/bios-tables-test-allowed-diff.h$#) {
$$acpi_nontestexpected = $name;
}
if (defined $$acpi_testexpected and defined $$acpi_nontestexpected) {
diff --git a/scripts/coverity-scan/coverity-scan.docker b/scripts/coverity-scan/coverity-scan.docker
index ad4d64c0f8..018c03de6d 100644
--- a/scripts/coverity-scan/coverity-scan.docker
+++ b/scripts/coverity-scan/coverity-scan.docker
@@ -125,5 +125,6 @@ RUN dnf install -y $PACKAGES
RUN rpm -q $PACKAGES | sort > /packages.txt
ENV PATH $PATH:/usr/libexec/python3-sphinx/
ENV COVERITY_TOOL_BASE=/coverity-tools
+COPY coverity_tool.tgz coverity_tool.tgz
+RUN mkdir -p /coverity-tools/coverity_tool && cd /coverity-tools/coverity_tool && tar xf /coverity_tool.tgz
COPY run-coverity-scan run-coverity-scan
-RUN --mount=type=secret,id=coverity.token,required ./run-coverity-scan --update-tools-only --tokenfile /run/secrets/coverity.token
diff --git a/scripts/coverity-scan/run-coverity-scan b/scripts/coverity-scan/run-coverity-scan
index 2e067ef5cf..03a791dec9 100755
--- a/scripts/coverity-scan/run-coverity-scan
+++ b/scripts/coverity-scan/run-coverity-scan
@@ -29,8 +29,11 @@
# Command line options:
# --dry-run : run the tools, but don't actually do the upload
-# --docker : create and work inside a docker container
+# --docker : create and work inside a container
+# --docker-engine : specify the container engine to use (docker/podman/auto);
+# implies --docker
# --update-tools-only : update the cached copy of the tools, but don't run them
+# --no-update-tools : do not update the cached copy of the tools
# --tokenfile : file to read Coverity token from
# --version ver : specify version being analyzed (default: ask git)
# --description desc : specify description of this version (default: ask git)
@@ -41,9 +44,10 @@
# is intended mainly for internal use by the Docker support
#
# User-specifiable environment variables:
-# COVERITY_TOKEN -- Coverity token
+# COVERITY_TOKEN -- Coverity token (default: looks at your
+# coverity.token config)
# COVERITY_EMAIL -- the email address to use for uploads (default:
-# looks at your git user.email config)
+# looks at your git coverity.email or user.email config)
# COVERITY_BUILD_CMD -- make command (default: 'make -jN' where N is
# number of CPUs as determined by 'nproc')
# COVERITY_TOOL_BASE -- set to directory to put coverity tools
@@ -58,11 +62,11 @@ check_upload_permissions() {
# with status 1 if the check failed (usually a bad token);
# will exit the script with status 0 if the check indicated that we
# can't upload yet (ie we are at quota)
- # Assumes that PROJTOKEN, PROJNAME and DRYRUN have been initialized.
+ # Assumes that COVERITY_TOKEN, PROJNAME and DRYRUN have been initialized.
echo "Checking upload permissions..."
- if ! up_perm="$(wget https://scan.coverity.com/api/upload_permitted --post-data "token=$PROJTOKEN&project=$PROJNAME" -q -O -)"; then
+ if ! up_perm="$(wget https://scan.coverity.com/api/upload_permitted --post-data "token=$COVERITY_TOKEN&project=$PROJNAME" -q -O -)"; then
echo "Coverity Scan API access denied: bad token?"
exit 1
fi
@@ -91,43 +95,62 @@ check_upload_permissions() {
}
+build_docker_image() {
+ # build docker container including the coverity-scan tools
+ echo "Building docker container..."
+ # TODO: This re-unpacks the tools every time, rather than caching
+ # and reusing the image produced by the COPY of the .tgz file.
+ # Not sure why.
+ tests/docker/docker.py --engine ${DOCKER_ENGINE} build \
+ -t coverity-scanner -f scripts/coverity-scan/coverity-scan.docker \
+ --extra-files scripts/coverity-scan/run-coverity-scan \
+ "$COVERITY_TOOL_BASE"/coverity_tool.tgz
+}
+
update_coverity_tools () {
# Check for whether we need to download the Coverity tools
# (either because we don't have a copy, or because it's out of date)
- # Assumes that COVERITY_TOOL_BASE, PROJTOKEN and PROJNAME are set.
+ # Assumes that COVERITY_TOOL_BASE, COVERITY_TOKEN and PROJNAME are set.
mkdir -p "$COVERITY_TOOL_BASE"
cd "$COVERITY_TOOL_BASE"
echo "Checking for new version of coverity build tools..."
- wget https://scan.coverity.com/download/linux64 --post-data "token=$PROJTOKEN&project=$PROJNAME&md5=1" -O coverity_tool.md5.new
+ wget https://scan.coverity.com/download/linux64 --post-data "token=$COVERITY_TOKEN&project=$PROJNAME&md5=1" -O coverity_tool.md5.new
if ! cmp -s coverity_tool.md5 coverity_tool.md5.new; then
# out of date md5 or no md5: download new build tool
# blow away the old build tool
echo "Downloading coverity build tools..."
rm -rf coverity_tool coverity_tool.tgz
- wget https://scan.coverity.com/download/linux64 --post-data "token=$PROJTOKEN&project=$PROJNAME" -O coverity_tool.tgz
+ wget https://scan.coverity.com/download/linux64 --post-data "token=$COVERITY_TOKEN&project=$PROJNAME" -O coverity_tool.tgz
if ! (cat coverity_tool.md5.new; echo " coverity_tool.tgz") | md5sum -c --status; then
echo "Downloaded tarball didn't match md5sum!"
exit 1
fi
- # extract the new one, keeping it corralled in a 'coverity_tool' directory
- echo "Unpacking coverity build tools..."
- mkdir -p coverity_tool
- cd coverity_tool
- tar xf ../coverity_tool.tgz
- cd ..
- mv coverity_tool.md5.new coverity_tool.md5
- fi
+ if [ "$DOCKER" != yes ]; then
+ # extract the new one, keeping it corralled in a 'coverity_tool' directory
+ echo "Unpacking coverity build tools..."
+ mkdir -p coverity_tool
+ cd coverity_tool
+ tar xf ../coverity_tool.tgz
+ cd ..
+ mv coverity_tool.md5.new coverity_tool.md5
+ fi
+ fi
rm -f coverity_tool.md5.new
+ cd "$SRCDIR"
+
+ if [ "$DOCKER" = yes ]; then
+ build_docker_image
+ fi
}
# Check user-provided environment variables and arguments
DRYRUN=no
-UPDATE_ONLY=no
+UPDATE=yes
DOCKER=no
while [ "$#" -ge 1 ]; do
@@ -136,9 +159,13 @@ while [ "$#" -ge 1 ]; do
shift
DRYRUN=yes
;;
+ --no-update-tools)
+ shift
+ UPDATE=no
+ ;;
--update-tools-only)
shift
- UPDATE_ONLY=yes
+ UPDATE=only
;;
--version)
shift
@@ -196,6 +223,17 @@ while [ "$#" -ge 1 ]; do
;;
--docker)
DOCKER=yes
+ DOCKER_ENGINE=auto
+ shift
+ ;;
+ --docker-engine)
+ shift
+ if [ $# -eq 0 ]; then
+ echo "--docker-engine needs an argument"
+ exit 1
+ fi
+ DOCKER=yes
+ DOCKER_ENGINE="$1"
shift
;;
*)
@@ -206,6 +244,9 @@ while [ "$#" -ge 1 ]; do
done
if [ -z "$COVERITY_TOKEN" ]; then
+ COVERITY_TOKEN="$(git config coverity.token)"
+fi
+if [ -z "$COVERITY_TOKEN" ]; then
echo "COVERITY_TOKEN environment variable not set"
exit 1
fi
@@ -225,19 +266,19 @@ if [ -z "$SRCDIR" ]; then
SRCDIR="$PWD"
fi
-PROJTOKEN="$COVERITY_TOKEN"
PROJNAME=QEMU
TARBALL=cov-int.tar.xz
-if [ "$UPDATE_ONLY" = yes ] && [ "$DOCKER" = yes ]; then
- echo "Combining --docker and --update-only is not supported"
- exit 1
-fi
-
-if [ "$UPDATE_ONLY" = yes ]; then
+if [ "$UPDATE" = only ]; then
# Just do the tools update; we don't need to check whether
# we are in a source tree or have upload rights for this,
# so do it before some of the command line and source tree checks.
+
+ if [ "$DOCKER" = yes ] && [ ! -z "$SRCTARBALL" ]; then
+ echo --update-tools-only --docker is incompatible with --src-tarball.
+ exit 1
+ fi
+
update_coverity_tools
exit 0
fi
@@ -269,17 +310,26 @@ if [ -z "$DESCRIPTION" ]; then
fi
if [ -z "$COVERITY_EMAIL" ]; then
+ COVERITY_EMAIL="$(git config coverity.email)"
+fi
+if [ -z "$COVERITY_EMAIL" ]; then
COVERITY_EMAIL="$(git config user.email)"
fi
+# Otherwise, continue with the full build and upload process.
+
+check_upload_permissions
+
+if [ "$UPDATE" != no ]; then
+ update_coverity_tools
+fi
+
# Run ourselves inside docker if that's what the user wants
if [ "$DOCKER" = yes ]; then
- # build docker container including the coverity-scan tools
# Put the Coverity token into a temporary file that only
# we have read access to, and then pass it to docker build
- # using --secret. This requires at least Docker 18.09.
- # Mostly what we are trying to do here is ensure we don't leak
- # the token into the Docker image.
+ # using a volume. A volume is enough for the token not to
+ # leak into the Docker image.
umask 077
SECRETDIR=$(mktemp -d)
if [ -z "$SECRETDIR" ]; then
@@ -290,38 +340,27 @@ if [ "$DOCKER" = yes ]; then
echo "Created temporary directory $SECRETDIR"
SECRET="$SECRETDIR/token"
echo "$COVERITY_TOKEN" > "$SECRET"
- echo "Building docker container..."
- # TODO: This re-downloads the tools every time, rather than
- # caching and reusing the image produced with the downloaded tools.
- # Not sure why.
- # TODO: how do you get 'docker build' to print the output of the
- # commands it is running to its stdout? This would be useful for debug.
- DOCKER_BUILDKIT=1 docker build -t coverity-scanner \
- --secret id=coverity.token,src="$SECRET" \
- -f scripts/coverity-scan/coverity-scan.docker \
- scripts/coverity-scan
echo "Archiving sources to be analyzed..."
./scripts/archive-source.sh "$SECRETDIR/qemu-sources.tgz"
+ ARGS="--no-update-tools"
if [ "$DRYRUN" = yes ]; then
- DRYRUNARG=--dry-run
+ ARGS="$ARGS --dry-run"
fi
echo "Running scanner..."
# If we need to capture the output tarball, get the inner run to
# save it to the secrets directory so we can copy it out before the
# directory is cleaned up.
if [ ! -z "$RESULTSTARBALL" ]; then
- RTARGS="--results-tarball /work/cov-int.tar.xz"
- else
- RTARGS=""
+ ARGS="$ARGS --results-tarball /work/cov-int.tar.xz"
fi
# Arrange for this docker run to get access to the sources with -v.
# We pass through all the configuration from the outer script to the inner.
export COVERITY_EMAIL COVERITY_BUILD_CMD
- docker run -it --env COVERITY_EMAIL --env COVERITY_BUILD_CMD \
+ tests/docker/docker.py run -it --env COVERITY_EMAIL --env COVERITY_BUILD_CMD \
-v "$SECRETDIR:/work" coverity-scanner \
./run-coverity-scan --version "$VERSION" \
- --description "$DESCRIPTION" $DRYRUNARG --tokenfile /work/token \
- --srcdir /qemu --src-tarball /work/qemu-sources.tgz $RTARGS
+ --description "$DESCRIPTION" $ARGS --tokenfile /work/token \
+ --srcdir /qemu --src-tarball /work/qemu-sources.tgz
if [ ! -z "$RESULTSTARBALL" ]; then
echo "Copying results tarball to $RESULTSTARBALL..."
cp "$SECRETDIR/cov-int.tar.xz" "$RESULTSTARBALL"
@@ -330,12 +369,6 @@ if [ "$DOCKER" = yes ]; then
exit 0
fi
-# Otherwise, continue with the full build and upload process.
-
-check_upload_permissions
-
-update_coverity_tools
-
TOOLBIN="$(cd "$COVERITY_TOOL_BASE" && echo $PWD/coverity_tool/cov-analysis-*/bin)"
if ! test -x "$TOOLBIN/cov-build"; then
@@ -393,7 +426,7 @@ if [ "$DRYRUN" = yes ]; then
exit 0
fi
-curl --form token="$PROJTOKEN" --form email="$COVERITY_EMAIL" \
+curl --form token="$COVERITY_TOKEN" --form email="$COVERITY_EMAIL" \
--form file=@"$TARBALL" --form version="$VERSION" \
--form description="$DESCRIPTION" \
https://scan.coverity.com/builds?project="$PROJNAME"
diff --git a/softmmu/vl.c b/softmmu/vl.c
index 05d1a4cb6b..f669c06ede 100644
--- a/softmmu/vl.c
+++ b/softmmu/vl.c
@@ -36,6 +36,7 @@
#include "sysemu/runstate.h"
#include "sysemu/seccomp.h"
#include "sysemu/tcg.h"
+#include "sysemu/xen.h"
#include "qemu/error-report.h"
#include "qemu/sockets.h"
@@ -178,7 +179,6 @@ static NotifierList exit_notifiers =
static NotifierList machine_init_done_notifiers =
NOTIFIER_LIST_INITIALIZER(machine_init_done_notifiers);
-bool xen_allowed;
uint32_t xen_domid;
enum xen_mode xen_mode = XEN_EMULATE;
bool xen_domid_restrict;
@@ -4334,12 +4334,13 @@ void qemu_init(int argc, char **argv, char **envp)
parse_numa_opts(current_machine);
+ /* do monitor/qmp handling at preconfig state if requested */
+ qemu_main_loop();
+
if (machine_class->default_ram_id && current_machine->ram_size &&
numa_uses_legacy_mem() && !current_machine->ram_memdev_id) {
create_default_memdev(current_machine, mem_path);
}
- /* do monitor/qmp handling at preconfig state if requested */
- qemu_main_loop();
audio_init_audiodevs();
diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
index c1e43ac68f..28e48171d1 100644
--- a/stubs/Makefile.objs
+++ b/stubs/Makefile.objs
@@ -49,7 +49,5 @@ stub-obj-y += target-get-monitor-def.o
stub-obj-y += target-monitor-defs.o
stub-obj-y += uuid.o
stub-obj-y += vm-stop.o
-stub-obj-y += xen-common.o
-stub-obj-y += xen-hvm.o
endif # CONFIG_SOFTMMU || CONFIG_TOOLS
diff --git a/stubs/qmp_memory_device.c b/stubs/qmp_memory_device.c
index 85ff8f2d7e..e75cac62dc 100644
--- a/stubs/qmp_memory_device.c
+++ b/stubs/qmp_memory_device.c
@@ -1,5 +1,4 @@
#include "qemu/osdep.h"
-#include "qom/object.h"
#include "hw/mem/memory-device.h"
MemoryDeviceInfoList *qmp_memory_device_list(void)
diff --git a/stubs/xen-common.c b/stubs/xen-common.c
deleted file mode 100644
index f5efcae362..0000000000
--- a/stubs/xen-common.c
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Copyright (C) 2014 Citrix Systems UK Ltd.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#include "qemu/osdep.h"
-#include "hw/xen/xen.h"
-
-void xenstore_store_pv_console_info(int i, Chardev *chr)
-{
-}
diff --git a/target/i386/TODO b/target/i386/TODO
deleted file mode 100644
index a8d69cf87f..0000000000
--- a/target/i386/TODO
+++ /dev/null
@@ -1,31 +0,0 @@
-Correctness issues:
-
-- some eflags manipulation incorrectly reset the bit 0x2.
-- SVM: test, cpu save/restore, SMM save/restore.
-- x86_64: lcall/ljmp intel/amd differences ?
-- better code fetch (different exception handling + CS.limit support)
-- user/kernel PUSHL/POPL in helper.c
-- add missing cpuid tests
-- return UD exception if LOCK prefix incorrectly used
-- test ldt limit < 7 ?
-- fix some 16 bit sp push/pop overflow (pusha/popa, lcall lret)
-- full support of segment limit/rights
-- full x87 exception support
-- improve x87 bit exactness (use bochs code ?)
-- DRx register support
-- CR0.AC emulation
-- SSE alignment checks
-
-Optimizations/Features:
-
-- add SVM nested paging support
-- add VMX support
-- add AVX support
-- add SSE5 support
-- fxsave/fxrstor AMD extensions
-- improve monitor/mwait support
-- faster EFLAGS update: consider SZAP, C, O can be updated separately
- with a bit field in CC_OP and more state variables.
-- evaluate x87 stack pointer statically
-- find a way to avoid translating several time the same TB if CR0.TS
- is set or not.
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 02065e35d4..4fe97f9b41 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -29,6 +29,7 @@
#include "sysemu/reset.h"
#include "sysemu/hvf.h"
#include "sysemu/cpus.h"
+#include "sysemu/xen.h"
#include "kvm_i386.h"
#include "sev_i386.h"
@@ -54,7 +55,6 @@
#include "hw/i386/topology.h"
#ifndef CONFIG_USER_ONLY
#include "exec/address-spaces.h"
-#include "hw/xen/xen.h"
#include "hw/i386/apic_internal.h"
#include "hw/boards.h"
#endif
@@ -985,7 +985,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.feat_names = {
NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
NULL, NULL, NULL, NULL,
- NULL, NULL, "md-clear", NULL,
+ "avx512-vp2intersect", NULL, "md-clear", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL /* pconfig */, NULL,
NULL, NULL, NULL, NULL,
@@ -1139,6 +1139,22 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.index = MSR_IA32_CORE_CAPABILITY,
},
},
+ [FEAT_PERF_CAPABILITIES] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, "full-width-write", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .msr = {
+ .index = MSR_IA32_PERF_CAPABILITIES,
+ },
+ },
[FEAT_VMX_PROCBASED_CTLS] = {
.type = MSR_FEATURE_WORD,
@@ -1317,6 +1333,10 @@ static FeatureDep feature_dependencies[] = {
.to = { FEAT_CORE_CAPABILITY, ~0ull },
},
{
+ .from = { FEAT_1_ECX, CPUID_EXT_PDCM },
+ .to = { FEAT_PERF_CAPABILITIES, ~0ull },
+ },
+ {
.from = { FEAT_1_ECX, CPUID_EXT_VMX },
.to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
},
@@ -5488,6 +5508,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ebx |= (cs->nr_cores * cs->nr_threads) << 16;
*edx |= CPUID_HT;
}
+ if (!cpu->enable_pmu) {
+ *ecx &= ~CPUID_EXT_PDCM;
+ }
break;
case 2:
/* cache info: needed for Pentium Pro compatibility */
@@ -5837,11 +5860,20 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*eax = cpu->phys_bits;
}
*ebx = env->features[FEAT_8000_0008_EBX];
- *ecx = 0;
- *edx = 0;
if (cs->nr_cores * cs->nr_threads > 1) {
- *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
+ /*
+ * Bits 15:12 is "The number of bits in the initial
+ * Core::X86::Apic::ApicId[ApicId] value that indicate
+ * thread ID within a package". This is already stored at
+ * CPUX86State::pkg_offset.
+ * Bits 7:0 is "The number of threads in the package is NC+1"
+ */
+ *ecx = (env->pkg_offset << 12) |
+ ((cs->nr_cores * cs->nr_threads) - 1);
+ } else {
+ *ecx = 0;
}
+ *edx = 0;
break;
case 0x8000000A:
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 408392dbf6..7d77efd9e4 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -356,6 +356,8 @@ typedef enum X86Seg {
#define MSR_IA32_ARCH_CAPABILITIES 0x10a
#define ARCH_CAP_TSX_CTRL_MSR (1<<7)
+#define MSR_IA32_PERF_CAPABILITIES 0x345
+
#define MSR_IA32_TSX_CTRL 0x122
#define MSR_IA32_TSCDEADLINE 0x6e0
@@ -529,6 +531,7 @@ typedef enum FeatureWord {
FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
FEAT_ARCH_CAPABILITIES,
FEAT_CORE_CAPABILITY,
+ FEAT_PERF_CAPABILITIES,
FEAT_VMX_PROCBASED_CTLS,
FEAT_VMX_SECONDARY_CTLS,
FEAT_VMX_PINBASED_CTLS,
@@ -772,6 +775,8 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2)
/* AVX512 Multiply Accumulation Single Precision */
#define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3)
+/* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
+#define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
/* Speculation Control */
#define CPUID_7_0_EDX_SPEC_CTRL (1U << 26)
/* Single Thread Indirect Branch Predictors */
@@ -1361,6 +1366,11 @@ typedef struct CPUCaches {
CPUCacheInfo *l3_cache;
} CPUCaches;
+typedef struct HVFX86LazyFlags {
+ target_ulong result;
+ target_ulong auxbits;
+} HVFX86LazyFlags;
+
typedef struct CPUX86State {
/* standard registers */
target_ulong regs[CPU_NB_REGS];
@@ -1584,6 +1594,7 @@ typedef struct CPUX86State {
bool tsc_valid;
int64_t tsc_khz;
int64_t user_tsc_khz; /* for sanity check only */
+ uint64_t apic_bus_freq;
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
void *xsave_buf;
#endif
@@ -1591,7 +1602,8 @@ typedef struct CPUX86State {
struct kvm_nested_state *nested_state;
#endif
#if defined(CONFIG_HVF)
- HVFX86EmulatorState *hvf_emul;
+ HVFX86LazyFlags hvf_lflags;
+ void *hvf_mmio_buf;
#endif
uint64_t mcg_cap;
@@ -1633,6 +1645,7 @@ struct X86CPU {
CPUNegativeOffsetState neg;
CPUX86State env;
+ VMChangeStateEntry *vmsentry;
uint64_t ucode_rev;
diff --git a/target/i386/fpu_helper.c b/target/i386/fpu_helper.c
index b34fa784eb..8ef5b463ea 100644
--- a/target/i386/fpu_helper.c
+++ b/target/i386/fpu_helper.c
@@ -59,8 +59,13 @@
#define FPUC_EM 0x3f
#define floatx80_lg2 make_floatx80(0x3ffd, 0x9a209a84fbcff799LL)
+#define floatx80_lg2_d make_floatx80(0x3ffd, 0x9a209a84fbcff798LL)
#define floatx80_l2e make_floatx80(0x3fff, 0xb8aa3b295c17f0bcLL)
+#define floatx80_l2e_d make_floatx80(0x3fff, 0xb8aa3b295c17f0bbLL)
#define floatx80_l2t make_floatx80(0x4000, 0xd49a784bcd1b8afeLL)
+#define floatx80_l2t_u make_floatx80(0x4000, 0xd49a784bcd1b8affLL)
+#define floatx80_ln2_d make_floatx80(0x3ffe, 0xb17217f7d1cf79abLL)
+#define floatx80_pi_d make_floatx80(0x4000, 0xc90fdaa22168c234LL)
#if !defined(CONFIG_USER_ONLY)
static qemu_irq ferr_irq;
@@ -156,12 +161,32 @@ static void fpu_set_exception(CPUX86State *env, int mask)
}
}
+static inline uint8_t save_exception_flags(CPUX86State *env)
+{
+ uint8_t old_flags = get_float_exception_flags(&env->fp_status);
+ set_float_exception_flags(0, &env->fp_status);
+ return old_flags;
+}
+
+static void merge_exception_flags(CPUX86State *env, uint8_t old_flags)
+{
+ uint8_t new_flags = get_float_exception_flags(&env->fp_status);
+ float_raise(old_flags, &env->fp_status);
+ fpu_set_exception(env,
+ ((new_flags & float_flag_invalid ? FPUS_IE : 0) |
+ (new_flags & float_flag_divbyzero ? FPUS_ZE : 0) |
+ (new_flags & float_flag_overflow ? FPUS_OE : 0) |
+ (new_flags & float_flag_underflow ? FPUS_UE : 0) |
+ (new_flags & float_flag_inexact ? FPUS_PE : 0) |
+ (new_flags & float_flag_input_denormal ? FPUS_DE : 0)));
+}
+
static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b)
{
- if (floatx80_is_zero(b)) {
- fpu_set_exception(env, FPUS_ZE);
- }
- return floatx80_div(a, b, &env->fp_status);
+ uint8_t old_flags = save_exception_flags(env);
+ floatx80 ret = floatx80_div(a, b, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+ return ret;
}
static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr)
@@ -178,6 +203,7 @@ static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr)
void helper_flds_FT0(CPUX86State *env, uint32_t val)
{
+ uint8_t old_flags = save_exception_flags(env);
union {
float32 f;
uint32_t i;
@@ -185,10 +211,12 @@ void helper_flds_FT0(CPUX86State *env, uint32_t val)
u.i = val;
FT0 = float32_to_floatx80(u.f, &env->fp_status);
+ merge_exception_flags(env, old_flags);
}
void helper_fldl_FT0(CPUX86State *env, uint64_t val)
{
+ uint8_t old_flags = save_exception_flags(env);
union {
float64 f;
uint64_t i;
@@ -196,6 +224,7 @@ void helper_fldl_FT0(CPUX86State *env, uint64_t val)
u.i = val;
FT0 = float64_to_floatx80(u.f, &env->fp_status);
+ merge_exception_flags(env, old_flags);
}
void helper_fildl_FT0(CPUX86State *env, int32_t val)
@@ -205,6 +234,7 @@ void helper_fildl_FT0(CPUX86State *env, int32_t val)
void helper_flds_ST0(CPUX86State *env, uint32_t val)
{
+ uint8_t old_flags = save_exception_flags(env);
int new_fpstt;
union {
float32 f;
@@ -216,10 +246,12 @@ void helper_flds_ST0(CPUX86State *env, uint32_t val)
env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
+ merge_exception_flags(env, old_flags);
}
void helper_fldl_ST0(CPUX86State *env, uint64_t val)
{
+ uint8_t old_flags = save_exception_flags(env);
int new_fpstt;
union {
float64 f;
@@ -231,6 +263,7 @@ void helper_fldl_ST0(CPUX86State *env, uint64_t val)
env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
+ merge_exception_flags(env, old_flags);
}
void helper_fildl_ST0(CPUX86State *env, int32_t val)
@@ -255,90 +288,108 @@ void helper_fildll_ST0(CPUX86State *env, int64_t val)
uint32_t helper_fsts_ST0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
union {
float32 f;
uint32_t i;
} u;
u.f = floatx80_to_float32(ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
return u.i;
}
uint64_t helper_fstl_ST0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
union {
float64 f;
uint64_t i;
} u;
u.f = floatx80_to_float64(ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
return u.i;
}
int32_t helper_fist_ST0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32(ST0, &env->fp_status);
if (val != (int16_t)val) {
+ set_float_exception_flags(float_flag_invalid, &env->fp_status);
val = -32768;
}
+ merge_exception_flags(env, old_flags);
return val;
}
int32_t helper_fistl_ST0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
int32_t val;
- signed char old_exp_flags;
-
- old_exp_flags = get_float_exception_flags(&env->fp_status);
- set_float_exception_flags(0, &env->fp_status);
val = floatx80_to_int32(ST0, &env->fp_status);
if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
val = 0x80000000;
}
- set_float_exception_flags(get_float_exception_flags(&env->fp_status)
- | old_exp_flags, &env->fp_status);
+ merge_exception_flags(env, old_flags);
return val;
}
int64_t helper_fistll_ST0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
int64_t val;
- signed char old_exp_flags;
-
- old_exp_flags = get_float_exception_flags(&env->fp_status);
- set_float_exception_flags(0, &env->fp_status);
val = floatx80_to_int64(ST0, &env->fp_status);
if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
val = 0x8000000000000000ULL;
}
- set_float_exception_flags(get_float_exception_flags(&env->fp_status)
- | old_exp_flags, &env->fp_status);
+ merge_exception_flags(env, old_flags);
return val;
}
int32_t helper_fistt_ST0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
if (val != (int16_t)val) {
+ set_float_exception_flags(float_flag_invalid, &env->fp_status);
val = -32768;
}
+ merge_exception_flags(env, old_flags);
return val;
}
int32_t helper_fisttl_ST0(CPUX86State *env)
{
- return floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
+ uint8_t old_flags = save_exception_flags(env);
+ int32_t val;
+
+ val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
+ if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
+ val = 0x80000000;
+ }
+ merge_exception_flags(env, old_flags);
+ return val;
}
int64_t helper_fisttll_ST0(CPUX86State *env)
{
- return floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
+ uint8_t old_flags = save_exception_flags(env);
+ int64_t val;
+
+ val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
+ if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
+ val = 0x8000000000000000ULL;
+ }
+ merge_exception_flags(env, old_flags);
+ return val;
}
void helper_fldt_ST0(CPUX86State *env, target_ulong ptr)
@@ -420,24 +471,29 @@ static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
void helper_fcom_ST0_FT0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
FloatRelation ret;
ret = floatx80_compare(ST0, FT0, &env->fp_status);
env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
+ merge_exception_flags(env, old_flags);
}
void helper_fucom_ST0_FT0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
FloatRelation ret;
ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
+ merge_exception_flags(env, old_flags);
}
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
void helper_fcomi_ST0_FT0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
int eflags;
FloatRelation ret;
@@ -445,10 +501,12 @@ void helper_fcomi_ST0_FT0(CPUX86State *env)
eflags = cpu_cc_compute_all(env, CC_OP);
eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
CC_SRC = eflags;
+ merge_exception_flags(env, old_flags);
}
void helper_fucomi_ST0_FT0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
int eflags;
FloatRelation ret;
@@ -456,26 +514,35 @@ void helper_fucomi_ST0_FT0(CPUX86State *env)
eflags = cpu_cc_compute_all(env, CC_OP);
eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
CC_SRC = eflags;
+ merge_exception_flags(env, old_flags);
}
void helper_fadd_ST0_FT0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
ST0 = floatx80_add(ST0, FT0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
}
void helper_fmul_ST0_FT0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
}
void helper_fsub_ST0_FT0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
}
void helper_fsubr_ST0_FT0(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
}
void helper_fdiv_ST0_FT0(CPUX86State *env)
@@ -492,22 +559,30 @@ void helper_fdivr_ST0_FT0(CPUX86State *env)
void helper_fadd_STN_ST0(CPUX86State *env, int st_index)
{
+ uint8_t old_flags = save_exception_flags(env);
ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
}
void helper_fmul_STN_ST0(CPUX86State *env, int st_index)
{
+ uint8_t old_flags = save_exception_flags(env);
ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
}
void helper_fsub_STN_ST0(CPUX86State *env, int st_index)
{
+ uint8_t old_flags = save_exception_flags(env);
ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
}
void helper_fsubr_STN_ST0(CPUX86State *env, int st_index)
{
+ uint8_t old_flags = save_exception_flags(env);
ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
+ merge_exception_flags(env, old_flags);
}
void helper_fdiv_STN_ST0(CPUX86State *env, int st_index)
@@ -544,27 +619,66 @@ void helper_fld1_ST0(CPUX86State *env)
void helper_fldl2t_ST0(CPUX86State *env)
{
- ST0 = floatx80_l2t;
+ switch (env->fpuc & FPU_RC_MASK) {
+ case FPU_RC_UP:
+ ST0 = floatx80_l2t_u;
+ break;
+ default:
+ ST0 = floatx80_l2t;
+ break;
+ }
}
void helper_fldl2e_ST0(CPUX86State *env)
{
- ST0 = floatx80_l2e;
+ switch (env->fpuc & FPU_RC_MASK) {
+ case FPU_RC_DOWN:
+ case FPU_RC_CHOP:
+ ST0 = floatx80_l2e_d;
+ break;
+ default:
+ ST0 = floatx80_l2e;
+ break;
+ }
}
void helper_fldpi_ST0(CPUX86State *env)
{
- ST0 = floatx80_pi;
+ switch (env->fpuc & FPU_RC_MASK) {
+ case FPU_RC_DOWN:
+ case FPU_RC_CHOP:
+ ST0 = floatx80_pi_d;
+ break;
+ default:
+ ST0 = floatx80_pi;
+ break;
+ }
}
void helper_fldlg2_ST0(CPUX86State *env)
{
- ST0 = floatx80_lg2;
+ switch (env->fpuc & FPU_RC_MASK) {
+ case FPU_RC_DOWN:
+ case FPU_RC_CHOP:
+ ST0 = floatx80_lg2_d;
+ break;
+ default:
+ ST0 = floatx80_lg2;
+ break;
+ }
}
void helper_fldln2_ST0(CPUX86State *env)
{
- ST0 = floatx80_ln2;
+ switch (env->fpuc & FPU_RC_MASK) {
+ case FPU_RC_DOWN:
+ case FPU_RC_CHOP:
+ ST0 = floatx80_ln2_d;
+ break;
+ default:
+ ST0 = floatx80_ln2;
+ break;
+ }
}
void helper_fldz_ST0(CPUX86State *env)
@@ -679,14 +793,29 @@ void helper_fbld_ST0(CPUX86State *env, target_ulong ptr)
void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
{
+ uint8_t old_flags = save_exception_flags(env);
int v;
target_ulong mem_ref, mem_end;
int64_t val;
+ CPU_LDoubleU temp;
+
+ temp.d = ST0;
val = floatx80_to_int64(ST0, &env->fp_status);
mem_ref = ptr;
+ if (val >= 1000000000000000000LL || val <= -1000000000000000000LL) {
+ set_float_exception_flags(float_flag_invalid, &env->fp_status);
+ while (mem_ref < ptr + 7) {
+ cpu_stb_data_ra(env, mem_ref++, 0, GETPC());
+ }
+ cpu_stb_data_ra(env, mem_ref++, 0xc0, GETPC());
+ cpu_stb_data_ra(env, mem_ref++, 0xff, GETPC());
+ cpu_stb_data_ra(env, mem_ref++, 0xff, GETPC());
+ merge_exception_flags(env, old_flags);
+ return;
+ }
mem_end = mem_ref + 9;
- if (val < 0) {
+ if (SIGND(temp)) {
cpu_stb_data_ra(env, mem_end, 0x80, GETPC());
val = -val;
} else {
@@ -704,6 +833,7 @@ void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
while (mem_ref < mem_end) {
cpu_stb_data_ra(env, mem_ref++, 0, GETPC());
}
+ merge_exception_flags(env, old_flags);
}
void helper_f2xm1(CPUX86State *env)
@@ -757,6 +887,7 @@ void helper_fpatan(CPUX86State *env)
void helper_fxtract(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
CPU_LDoubleU temp;
temp.d = ST0;
@@ -767,16 +898,40 @@ void helper_fxtract(CPUX86State *env)
&env->fp_status);
fpush(env);
ST0 = temp.d;
+ } else if (floatx80_invalid_encoding(ST0)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_default_nan(&env->fp_status);
+ fpush(env);
+ ST0 = ST1;
+ } else if (floatx80_is_any_nan(ST0)) {
+ if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_silence_nan(ST0, &env->fp_status);
+ }
+ fpush(env);
+ ST0 = ST1;
+ } else if (floatx80_is_infinity(ST0)) {
+ fpush(env);
+ ST0 = ST1;
+ ST1 = floatx80_infinity;
} else {
int expdif;
- expdif = EXPD(temp) - EXPBIAS;
+ if (EXPD(temp) == 0) {
+ int shift = clz64(temp.l.lower);
+ temp.l.lower <<= shift;
+ expdif = 1 - EXPBIAS - shift;
+ float_raise(float_flag_input_denormal, &env->fp_status);
+ } else {
+ expdif = EXPD(temp) - EXPBIAS;
+ }
/* DP exponent bias */
ST0 = int32_to_floatx80(expdif, &env->fp_status);
fpush(env);
BIASEXPONENT(temp);
ST0 = temp.d;
}
+ merge_exception_flags(env, old_flags);
}
void helper_fprem1(CPUX86State *env)
@@ -916,11 +1071,13 @@ void helper_fyl2xp1(CPUX86State *env)
void helper_fsqrt(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
if (floatx80_is_neg(ST0)) {
env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
env->fpus |= 0x400;
}
ST0 = floatx80_sqrt(ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
}
void helper_fsincos(CPUX86State *env)
@@ -940,17 +1097,60 @@ void helper_fsincos(CPUX86State *env)
void helper_frndint(CPUX86State *env)
{
+ uint8_t old_flags = save_exception_flags(env);
ST0 = floatx80_round_to_int(ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
}
void helper_fscale(CPUX86State *env)
{
- if (floatx80_is_any_nan(ST1)) {
+ uint8_t old_flags = save_exception_flags(env);
+ if (floatx80_invalid_encoding(ST1) || floatx80_invalid_encoding(ST0)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_default_nan(&env->fp_status);
+ } else if (floatx80_is_any_nan(ST1)) {
+ if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ }
ST0 = ST1;
+ if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_silence_nan(ST0, &env->fp_status);
+ }
+ } else if (floatx80_is_infinity(ST1) &&
+ !floatx80_invalid_encoding(ST0) &&
+ !floatx80_is_any_nan(ST0)) {
+ if (floatx80_is_neg(ST1)) {
+ if (floatx80_is_infinity(ST0)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_default_nan(&env->fp_status);
+ } else {
+ ST0 = (floatx80_is_neg(ST0) ?
+ floatx80_chs(floatx80_zero) :
+ floatx80_zero);
+ }
+ } else {
+ if (floatx80_is_zero(ST0)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_default_nan(&env->fp_status);
+ } else {
+ ST0 = (floatx80_is_neg(ST0) ?
+ floatx80_chs(floatx80_infinity) :
+ floatx80_infinity);
+ }
+ }
} else {
- int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
+ int n;
+ signed char save = env->fp_status.floatx80_rounding_precision;
+ uint8_t save_flags = get_float_exception_flags(&env->fp_status);
+ set_float_exception_flags(0, &env->fp_status);
+ n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
+ set_float_exception_flags(save_flags, &env->fp_status);
+ env->fp_status.floatx80_rounding_precision = 80;
ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
+ env->fp_status.floatx80_rounding_precision = save;
}
+ merge_exception_flags(env, old_flags);
}
void helper_fsin(CPUX86State *env)
@@ -1000,7 +1200,7 @@ void helper_fxam_ST0(CPUX86State *env)
if (expdif == MAXEXPD) {
if (MANTD(temp) == 0x8000000000000000ULL) {
env->fpus |= 0x500; /* Infinity */
- } else {
+ } else if (MANTD(temp) & 0x8000000000000000ULL) {
env->fpus |= 0x100; /* NaN */
}
} else if (expdif == 0) {
@@ -1009,7 +1209,7 @@ void helper_fxam_ST0(CPUX86State *env)
} else {
env->fpus |= 0x4400; /* Denormal */
}
- } else {
+ } else if (MANTD(temp) & 0x8000000000000000ULL) {
env->fpus |= 0x400;
}
}
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
index f9c83fff25..c93bb23a44 100644
--- a/target/i386/hax-all.c
+++ b/target/i386/hax-all.c
@@ -232,10 +232,10 @@ int hax_init_vcpu(CPUState *cpu)
return ret;
}
-struct hax_vm *hax_vm_create(struct hax_state *hax)
+struct hax_vm *hax_vm_create(struct hax_state *hax, int max_cpus)
{
struct hax_vm *vm;
- int vm_id = 0, ret;
+ int vm_id = 0, ret, i;
if (hax_invalid_fd(hax->fd)) {
return NULL;
@@ -245,6 +245,11 @@ struct hax_vm *hax_vm_create(struct hax_state *hax)
return hax->vm;
}
+ if (max_cpus > HAX_MAX_VCPU) {
+ fprintf(stderr, "Maximum VCPU number QEMU supported is %d\n", HAX_MAX_VCPU);
+ return NULL;
+ }
+
vm = g_new0(struct hax_vm, 1);
ret = hax_host_create_vm(hax, &vm_id);
@@ -259,6 +264,12 @@ struct hax_vm *hax_vm_create(struct hax_state *hax)
goto error;
}
+ vm->numvcpus = max_cpus;
+ vm->vcpus = g_new0(struct hax_vcpu_state *, vm->numvcpus);
+ for (i = 0; i < vm->numvcpus; i++) {
+ vm->vcpus[i] = NULL;
+ }
+
hax->vm = vm;
return vm;
@@ -272,12 +283,14 @@ int hax_vm_destroy(struct hax_vm *vm)
{
int i;
- for (i = 0; i < HAX_MAX_VCPU; i++)
+ for (i = 0; i < vm->numvcpus; i++)
if (vm->vcpus[i]) {
fprintf(stderr, "VCPU should be cleaned before vm clean\n");
return -1;
}
hax_close_fd(vm->fd);
+ vm->numvcpus = 0;
+ g_free(vm->vcpus);
g_free(vm);
hax_global.vm = NULL;
return 0;
@@ -292,7 +305,7 @@ static void hax_handle_interrupt(CPUState *cpu, int mask)
}
}
-static int hax_init(ram_addr_t ram_size)
+static int hax_init(ram_addr_t ram_size, int max_cpus)
{
struct hax_state *hax = NULL;
struct hax_qemu_version qversion;
@@ -324,7 +337,7 @@ static int hax_init(ram_addr_t ram_size)
goto error;
}
- hax->vm = hax_vm_create(hax);
+ hax->vm = hax_vm_create(hax, max_cpus);
if (!hax->vm) {
fprintf(stderr, "Failed to create HAX VM\n");
ret = -EINVAL;
@@ -352,7 +365,7 @@ static int hax_init(ram_addr_t ram_size)
static int hax_accel_init(MachineState *ms)
{
- int ret = hax_init(ms->ram_size);
+ int ret = hax_init(ms->ram_size, (int)ms->smp.max_cpus);
if (ret && (ret != -ENOSPC)) {
fprintf(stderr, "No accelerator found.\n");
diff --git a/target/i386/hax-i386.h b/target/i386/hax-i386.h
index 54e9d8b057..ec28708185 100644
--- a/target/i386/hax-i386.h
+++ b/target/i386/hax-i386.h
@@ -41,13 +41,12 @@ struct hax_state {
};
#define HAX_MAX_VCPU 0x10
-#define MAX_VM_ID 0x40
-#define MAX_VCPU_ID 0x40
struct hax_vm {
hax_fd fd;
int id;
- struct hax_vcpu_state *vcpus[HAX_MAX_VCPU];
+ int numvcpus;
+ struct hax_vcpu_state **vcpus;
};
#ifdef NEED_CPU_H
@@ -58,7 +57,7 @@ int valid_hax_tunnel_size(uint16_t size);
/* Host specific functions */
int hax_mod_version(struct hax_state *hax, struct hax_module_version *version);
int hax_inject_interrupt(CPUArchState *env, int vector);
-struct hax_vm *hax_vm_create(struct hax_state *hax);
+struct hax_vm *hax_vm_create(struct hax_state *hax, int max_cpus);
int hax_vcpu_run(struct hax_vcpu_state *vcpu);
int hax_vcpu_create(int id);
int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state,
diff --git a/target/i386/hvf/hvf-i386.h b/target/i386/hvf/hvf-i386.h
index 15ee4835cf..ef20c73eca 100644
--- a/target/i386/hvf/hvf-i386.h
+++ b/target/i386/hvf/hvf-i386.h
@@ -16,13 +16,12 @@
#ifndef HVF_I386_H
#define HVF_I386_H
+#include "sysemu/accel.h"
#include "sysemu/hvf.h"
#include "cpu.h"
#include "x86.h"
#define HVF_MAX_VCPU 0x10
-#define MAX_VM_ID 0x40
-#define MAX_VCPU_ID 0x40
extern struct hvf_state hvf_global;
@@ -37,6 +36,40 @@ struct hvf_state {
uint64_t mem_quota;
};
+/* hvf_slot flags */
+#define HVF_SLOT_LOG (1 << 0)
+
+typedef struct hvf_slot {
+ uint64_t start;
+ uint64_t size;
+ uint8_t *mem;
+ int slot_id;
+ uint32_t flags;
+ MemoryRegion *region;
+} hvf_slot;
+
+typedef struct hvf_vcpu_caps {
+ uint64_t vmx_cap_pinbased;
+ uint64_t vmx_cap_procbased;
+ uint64_t vmx_cap_procbased2;
+ uint64_t vmx_cap_entry;
+ uint64_t vmx_cap_exit;
+ uint64_t vmx_cap_preemption_timer;
+} hvf_vcpu_caps;
+
+typedef struct HVFState {
+ AccelState parent;
+ hvf_slot slots[32];
+ int num_slots;
+
+ hvf_vcpu_caps *hvf_caps;
+} HVFState;
+extern HVFState *hvf_state;
+
+void hvf_set_phys_mem(MemoryRegionSection *, bool);
+void hvf_handle_io(CPUArchState *, uint16_t, void *, int, int, int);
+hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
+
#ifdef NEED_CPU_H
/* Functions exported to host specific mode */
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index d72543dc31..be016b951a 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -251,7 +251,7 @@ void vmx_update_tpr(CPUState *cpu)
}
}
-void update_apic_tpr(CPUState *cpu)
+static void update_apic_tpr(CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
int tpr = rreg(cpu->hvf_fd, HV_X86_TPR) >> 4;
@@ -312,7 +312,8 @@ void hvf_cpu_synchronize_post_reset(CPUState *cpu_state)
run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
}
-void _hvf_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
+static void do_hvf_cpu_synchronize_post_init(CPUState *cpu,
+ run_on_cpu_data arg)
{
CPUState *cpu_state = cpu;
hvf_put_registers(cpu_state);
@@ -321,7 +322,7 @@ void _hvf_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
void hvf_cpu_synchronize_post_init(CPUState *cpu_state)
{
- run_on_cpu(cpu_state, _hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
+ run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
}
static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
@@ -532,7 +533,11 @@ void hvf_reset_vcpu(CPUState *cpu) {
void hvf_vcpu_destroy(CPUState *cpu)
{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
hv_return_t ret = hv_vcpu_destroy((hv_vcpuid_t)cpu->hvf_fd);
+ g_free(env->hvf_mmio_buf);
assert_hvf_ok(ret);
}
@@ -562,7 +567,7 @@ int hvf_init_vcpu(CPUState *cpu)
init_decoder();
hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1);
- env->hvf_emul = g_new0(HVFX86EmulatorState, 1);
+ env->hvf_mmio_buf = g_new(char, 4096);
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
cpu->vcpu_dirty = 1;
@@ -722,8 +727,7 @@ int hvf_vcpu_exec(CPUState *cpu)
hvf_store_events(cpu, ins_len, idtvec_info);
rip = rreg(cpu->hvf_fd, HV_X86_RIP);
- RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
- env->eflags = RFLAGS(env);
+ env->eflags = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
qemu_mutex_lock_iothread();
@@ -735,7 +739,7 @@ int hvf_vcpu_exec(CPUState *cpu)
case EXIT_REASON_HLT: {
macvm_set_rip(cpu, rip + ins_len);
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
- (EFLAGS(env) & IF_MASK))
+ (env->eflags & IF_MASK))
&& !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
!(idtvec_info & VMCS_IDT_VEC_VALID)) {
cpu->halted = 1;
@@ -766,8 +770,6 @@ int hvf_vcpu_exec(CPUState *cpu)
struct x86_decode decode;
load_regs(cpu);
- env->hvf_emul->fetch_rip = rip;
-
decode_instruction(env, &decode);
exec_instruction(env, &decode);
store_regs(cpu);
@@ -796,7 +798,7 @@ int hvf_vcpu_exec(CPUState *cpu)
} else {
RAX(env) = (uint64_t)val;
}
- RIP(env) += ins_len;
+ env->eip += ins_len;
store_regs(cpu);
break;
} else if (!string && !in) {
@@ -808,8 +810,6 @@ int hvf_vcpu_exec(CPUState *cpu)
struct x86_decode decode;
load_regs(cpu);
- env->hvf_emul->fetch_rip = rip;
-
decode_instruction(env, &decode);
assert(ins_len == decode.len);
exec_instruction(env, &decode);
@@ -870,7 +870,7 @@ int hvf_vcpu_exec(CPUState *cpu)
} else {
simulate_wrmsr(cpu);
}
- RIP(env) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
+ env->eip += ins_len;
store_regs(cpu);
break;
}
@@ -906,7 +906,7 @@ int hvf_vcpu_exec(CPUState *cpu)
error_report("Unrecognized CR %d", cr);
abort();
}
- RIP(env) += ins_len;
+ env->eip += ins_len;
store_regs(cpu);
break;
}
@@ -914,8 +914,6 @@ int hvf_vcpu_exec(CPUState *cpu)
struct x86_decode decode;
load_regs(cpu);
- env->hvf_emul->fetch_rip = rip;
-
decode_instruction(env, &decode);
exec_instruction(env, &decode);
store_regs(cpu);
diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c
index 3afcedc7fc..fdb11c8db9 100644
--- a/target/i386/hvf/x86.c
+++ b/target/i386/hvf/x86.c
@@ -131,7 +131,7 @@ bool x86_is_v8086(struct CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
- return x86_is_protected(cpu) && (RFLAGS(env) & RFLAGS_VM);
+ return x86_is_protected(cpu) && (env->eflags & VM_MASK);
}
bool x86_is_long_mode(struct CPUState *cpu)
diff --git a/target/i386/hvf/x86.h b/target/i386/hvf/x86.h
index c95d5b2116..bacade7b65 100644
--- a/target/i386/hvf/x86.h
+++ b/target/i386/hvf/x86.h
@@ -42,64 +42,6 @@ typedef struct x86_register {
};
} __attribute__ ((__packed__)) x86_register;
-typedef enum x86_rflags {
- RFLAGS_CF = (1L << 0),
- RFLAGS_PF = (1L << 2),
- RFLAGS_AF = (1L << 4),
- RFLAGS_ZF = (1L << 6),
- RFLAGS_SF = (1L << 7),
- RFLAGS_TF = (1L << 8),
- RFLAGS_IF = (1L << 9),
- RFLAGS_DF = (1L << 10),
- RFLAGS_OF = (1L << 11),
- RFLAGS_IOPL = (3L << 12),
- RFLAGS_NT = (1L << 14),
- RFLAGS_RF = (1L << 16),
- RFLAGS_VM = (1L << 17),
- RFLAGS_AC = (1L << 18),
- RFLAGS_VIF = (1L << 19),
- RFLAGS_VIP = (1L << 20),
- RFLAGS_ID = (1L << 21),
-} x86_rflags;
-
-/* rflags register */
-typedef struct x86_reg_flags {
- union {
- struct {
- uint64_t rflags;
- };
- struct {
- uint32_t eflags;
- uint32_t hi32_unused1;
- };
- struct {
- uint32_t cf:1;
- uint32_t unused1:1;
- uint32_t pf:1;
- uint32_t unused2:1;
- uint32_t af:1;
- uint32_t unused3:1;
- uint32_t zf:1;
- uint32_t sf:1;
- uint32_t tf:1;
- uint32_t ief:1;
- uint32_t df:1;
- uint32_t of:1;
- uint32_t iopl:2;
- uint32_t nt:1;
- uint32_t unused4:1;
- uint32_t rf:1;
- uint32_t vm:1;
- uint32_t ac:1;
- uint32_t vif:1;
- uint32_t vip:1;
- uint32_t id:1;
- uint32_t unused5:10;
- uint32_t hi32_unused2;
- };
- };
-} __attribute__ ((__packed__)) x86_reg_flags;
-
typedef enum x86_reg_cr0 {
CR0_PE = (1L << 0),
CR0_MP = (1L << 1),
@@ -286,29 +228,10 @@ typedef struct x68_segment_selector {
};
} __attribute__ ((__packed__)) x68_segment_selector;
-typedef struct lazy_flags {
- target_ulong result;
- target_ulong auxbits;
-} lazy_flags;
-
-/* Definition of hvf_x86_state is here */
-struct HVFX86EmulatorState {
- int interruptable;
- uint64_t fetch_rip;
- uint64_t rip;
- struct x86_register regs[16];
- struct x86_reg_flags rflags;
- struct lazy_flags lflags;
- uint8_t mmio_buf[4096];
-};
-
/* useful register access macros */
-#define RIP(cpu) (cpu->hvf_emul->rip)
-#define EIP(cpu) ((uint32_t)cpu->hvf_emul->rip)
-#define RFLAGS(cpu) (cpu->hvf_emul->rflags.rflags)
-#define EFLAGS(cpu) (cpu->hvf_emul->rflags.eflags)
+#define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg])
-#define RRX(cpu, reg) (cpu->hvf_emul->regs[reg].rrx)
+#define RRX(cpu, reg) (x86_reg(cpu, reg)->rrx)
#define RAX(cpu) RRX(cpu, R_EAX)
#define RCX(cpu) RRX(cpu, R_ECX)
#define RDX(cpu) RRX(cpu, R_EDX)
@@ -326,7 +249,7 @@ struct HVFX86EmulatorState {
#define R14(cpu) RRX(cpu, R_R14)
#define R15(cpu) RRX(cpu, R_R15)
-#define ERX(cpu, reg) (cpu->hvf_emul->regs[reg].erx)
+#define ERX(cpu, reg) (x86_reg(cpu, reg)->erx)
#define EAX(cpu) ERX(cpu, R_EAX)
#define ECX(cpu) ERX(cpu, R_ECX)
#define EDX(cpu) ERX(cpu, R_EDX)
@@ -336,7 +259,7 @@ struct HVFX86EmulatorState {
#define ESI(cpu) ERX(cpu, R_ESI)
#define EDI(cpu) ERX(cpu, R_EDI)
-#define RX(cpu, reg) (cpu->hvf_emul->regs[reg].rx)
+#define RX(cpu, reg) (x86_reg(cpu, reg)->rx)
#define AX(cpu) RX(cpu, R_EAX)
#define CX(cpu) RX(cpu, R_ECX)
#define DX(cpu) RX(cpu, R_EDX)
@@ -346,13 +269,13 @@ struct HVFX86EmulatorState {
#define SI(cpu) RX(cpu, R_ESI)
#define DI(cpu) RX(cpu, R_EDI)
-#define RL(cpu, reg) (cpu->hvf_emul->regs[reg].lx)
+#define RL(cpu, reg) (x86_reg(cpu, reg)->lx)
#define AL(cpu) RL(cpu, R_EAX)
#define CL(cpu) RL(cpu, R_ECX)
#define DL(cpu) RL(cpu, R_EDX)
#define BL(cpu) RL(cpu, R_EBX)
-#define RH(cpu, reg) (cpu->hvf_emul->regs[reg].hx)
+#define RH(cpu, reg) (x86_reg(cpu, reg)->hx)
#define AH(cpu) RH(cpu, R_EAX)
#define CH(cpu) RH(cpu, R_ECX)
#define DH(cpu) RH(cpu, R_EDX)
diff --git a/target/i386/hvf/x86_decode.c b/target/i386/hvf/x86_decode.c
index 77c346605f..34c5e3006c 100644
--- a/target/i386/hvf/x86_decode.c
+++ b/target/i386/hvf/x86_decode.c
@@ -29,8 +29,7 @@
static void decode_invalid(CPUX86State *env, struct x86_decode *decode)
{
- printf("%llx: failed to decode instruction ", env->hvf_emul->fetch_rip -
- decode->len);
+ printf("%llx: failed to decode instruction ", env->eip);
for (int i = 0; i < decode->opcode_len; i++) {
printf("%x ", decode->opcode[i]);
}
@@ -75,7 +74,7 @@ static inline uint64_t decode_bytes(CPUX86State *env, struct x86_decode *decode,
VM_PANIC_EX("%s invalid size %d\n", __func__, size);
break;
}
- target_ulong va = linear_rip(env_cpu(env), RIP(env)) + decode->len;
+ target_ulong va = linear_rip(env_cpu(env), env->eip) + decode->len;
vmx_read_mem(env_cpu(env), &val, va, size);
decode->len += size;
@@ -698,15 +697,13 @@ static void decode_db_4(CPUX86State *env, struct x86_decode *decode)
#define RFLAGS_MASK_NONE 0
-#define RFLAGS_MASK_OSZAPC (RFLAGS_OF | RFLAGS_SF | RFLAGS_ZF | RFLAGS_AF | \
- RFLAGS_PF | RFLAGS_CF)
-#define RFLAGS_MASK_LAHF (RFLAGS_SF | RFLAGS_ZF | RFLAGS_AF | RFLAGS_PF | \
- RFLAGS_CF)
-#define RFLAGS_MASK_CF (RFLAGS_CF)
-#define RFLAGS_MASK_IF (RFLAGS_IF)
-#define RFLAGS_MASK_TF (RFLAGS_TF)
-#define RFLAGS_MASK_DF (RFLAGS_DF)
-#define RFLAGS_MASK_ZF (RFLAGS_ZF)
+#define RFLAGS_MASK_OSZAPC (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C)
+#define RFLAGS_MASK_LAHF (CC_S | CC_Z | CC_A | CC_P | CC_C)
+#define RFLAGS_MASK_CF (CC_C)
+#define RFLAGS_MASK_IF (IF_MASK)
+#define RFLAGS_MASK_TF (TF_MASK)
+#define RFLAGS_MASK_DF (DF_MASK)
+#define RFLAGS_MASK_ZF (CC_Z)
struct decode_tbl _1op_inst[] = {
{0x0, X86_DECODE_CMD_ADD, 1, true, decode_modrm_rm, decode_modrm_reg, NULL,
@@ -1771,7 +1768,7 @@ void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
ptr += get_sib_val(env, decode, &seg);
} else if (!decode->modrm.mod && 5 == decode->modrm.rm) {
if (x86_is_long_mode(env_cpu(env))) {
- ptr += RIP(env) + decode->len;
+ ptr += env->eip + decode->len;
} else {
ptr = decode->displacement;
}
@@ -1807,7 +1804,7 @@ void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
if (4 == rm) {
ptr = get_sib_val(env, decode, &seg) + offset;
} else if (0 == mod && 5 == rm) {
- ptr = RIP(env) + decode->len + (int32_t) offset;
+ ptr = env->eip + decode->len + (int32_t) offset;
} else {
ptr = get_reg_val(env, src, decode->rex.rex, decode->rex.b, 8) +
(int64_t) offset;
diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c
index 92ab815f5d..d3e289ed87 100644
--- a/target/i386/hvf/x86_emu.c
+++ b/target/i386/hvf/x86_emu.c
@@ -95,13 +95,13 @@ target_ulong read_reg(CPUX86State *env, int reg, int size)
{
switch (size) {
case 1:
- return env->hvf_emul->regs[reg].lx;
+ return x86_reg(env, reg)->lx;
case 2:
- return env->hvf_emul->regs[reg].rx;
+ return x86_reg(env, reg)->rx;
case 4:
- return env->hvf_emul->regs[reg].erx;
+ return x86_reg(env, reg)->erx;
case 8:
- return env->hvf_emul->regs[reg].rrx;
+ return x86_reg(env, reg)->rrx;
default:
abort();
}
@@ -112,16 +112,16 @@ void write_reg(CPUX86State *env, int reg, target_ulong val, int size)
{
switch (size) {
case 1:
- env->hvf_emul->regs[reg].lx = val;
+ x86_reg(env, reg)->lx = val;
break;
case 2:
- env->hvf_emul->regs[reg].rx = val;
+ x86_reg(env, reg)->rx = val;
break;
case 4:
- env->hvf_emul->regs[reg].rrx = (uint32_t)val;
+ x86_reg(env, reg)->rrx = (uint32_t)val;
break;
case 8:
- env->hvf_emul->regs[reg].rrx = val;
+ x86_reg(env, reg)->rrx = val;
break;
default:
abort();
@@ -173,7 +173,7 @@ void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size)
static bool is_host_reg(struct CPUX86State *env, target_ulong ptr)
{
- return (ptr - (target_ulong)&env->hvf_emul->regs[0]) < sizeof(env->hvf_emul->regs);
+ return (ptr - (target_ulong)&env->regs[0]) < sizeof(env->regs);
}
void write_val_ext(struct CPUX86State *env, target_ulong ptr, target_ulong val, int size)
@@ -187,8 +187,8 @@ void write_val_ext(struct CPUX86State *env, target_ulong ptr, target_ulong val,
uint8_t *read_mmio(struct CPUX86State *env, target_ulong ptr, int bytes)
{
- vmx_read_mem(env_cpu(env), env->hvf_emul->mmio_buf, ptr, bytes);
- return env->hvf_emul->mmio_buf;
+ vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, ptr, bytes);
+ return env->hvf_mmio_buf;
}
@@ -267,49 +267,49 @@ static void exec_mov(struct CPUX86State *env, struct x86_decode *decode)
write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
decode->operand_size);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_add(struct CPUX86State *env, struct x86_decode *decode)
{
EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_or(struct CPUX86State *env, struct x86_decode *decode)
{
EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_adc(struct CPUX86State *env, struct x86_decode *decode)
{
EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_sbb(struct CPUX86State *env, struct x86_decode *decode)
{
EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_and(struct CPUX86State *env, struct x86_decode *decode)
{
EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_sub(struct CPUX86State *env, struct x86_decode *decode)
{
EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_xor(struct CPUX86State *env, struct x86_decode *decode)
{
EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_neg(struct CPUX86State *env, struct x86_decode *decode)
@@ -332,13 +332,13 @@ static void exec_neg(struct CPUX86State *env, struct x86_decode *decode)
}
/*lflags_to_rflags(env);*/
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_cmp(struct CPUX86State *env, struct x86_decode *decode)
{
EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_inc(struct CPUX86State *env, struct x86_decode *decode)
@@ -348,7 +348,7 @@ static void exec_inc(struct CPUX86State *env, struct x86_decode *decode)
EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_dec(struct CPUX86State *env, struct x86_decode *decode)
@@ -357,13 +357,13 @@ static void exec_dec(struct CPUX86State *env, struct x86_decode *decode)
decode->op[1].val = 0;
EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_tst(struct CPUX86State *env, struct x86_decode *decode)
{
EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_not(struct CPUX86State *env, struct x86_decode *decode)
@@ -372,7 +372,7 @@ static void exec_not(struct CPUX86State *env, struct x86_decode *decode)
write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,
decode->operand_size);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
void exec_movzx(struct CPUX86State *env, struct x86_decode *decode)
@@ -392,7 +392,7 @@ void exec_movzx(struct CPUX86State *env, struct x86_decode *decode)
decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);
write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_out(struct CPUX86State *env, struct x86_decode *decode)
@@ -416,7 +416,7 @@ static void exec_out(struct CPUX86State *env, struct x86_decode *decode)
VM_PANIC("Bad out opcode\n");
break;
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_in(struct CPUX86State *env, struct x86_decode *decode)
@@ -452,14 +452,14 @@ static void exec_in(struct CPUX86State *env, struct x86_decode *decode)
break;
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static inline void string_increment_reg(struct CPUX86State *env, int reg,
struct x86_decode *decode)
{
target_ulong val = read_reg(env, reg, decode->addressing_size);
- if (env->hvf_emul->rflags.df) {
+ if (env->eflags & DF_MASK) {
val -= decode->operand_size;
} else {
val += decode->operand_size;
@@ -489,9 +489,9 @@ static void exec_ins_single(struct CPUX86State *env, struct x86_decode *decode)
target_ulong addr = linear_addr_size(env_cpu(env), RDI(env),
decode->addressing_size, R_ES);
- hvf_handle_io(env_cpu(env), DX(env), env->hvf_emul->mmio_buf, 0,
+ hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 0,
decode->operand_size, 1);
- vmx_write_mem(env_cpu(env), addr, env->hvf_emul->mmio_buf,
+ vmx_write_mem(env_cpu(env), addr, env->hvf_mmio_buf,
decode->operand_size);
string_increment_reg(env, R_EDI, decode);
@@ -505,16 +505,16 @@ static void exec_ins(struct CPUX86State *env, struct x86_decode *decode)
exec_ins_single(env, decode);
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_outs_single(struct CPUX86State *env, struct x86_decode *decode)
{
target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS);
- vmx_read_mem(env_cpu(env), env->hvf_emul->mmio_buf, addr,
+ vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, addr,
decode->operand_size);
- hvf_handle_io(env_cpu(env), DX(env), env->hvf_emul->mmio_buf, 1,
+ hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 1,
decode->operand_size, 1);
string_increment_reg(env, R_ESI, decode);
@@ -528,7 +528,7 @@ static void exec_outs(struct CPUX86State *env, struct x86_decode *decode)
exec_outs_single(env, decode);
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_movs_single(struct CPUX86State *env, struct x86_decode *decode)
@@ -556,7 +556,7 @@ static void exec_movs(struct CPUX86State *env, struct x86_decode *decode)
exec_movs_single(env, decode);
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_cmps_single(struct CPUX86State *env, struct x86_decode *decode)
@@ -586,7 +586,7 @@ static void exec_cmps(struct CPUX86State *env, struct x86_decode *decode)
} else {
exec_cmps_single(env, decode);
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
@@ -612,7 +612,7 @@ static void exec_stos(struct CPUX86State *env, struct x86_decode *decode)
exec_stos_single(env, decode);
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_scas_single(struct CPUX86State *env, struct x86_decode *decode)
@@ -638,7 +638,7 @@ static void exec_scas(struct CPUX86State *env, struct x86_decode *decode)
exec_scas_single(env, decode);
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_lods_single(struct CPUX86State *env, struct x86_decode *decode)
@@ -661,7 +661,7 @@ static void exec_lods(struct CPUX86State *env, struct x86_decode *decode)
exec_lods_single(env, decode);
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
void simulate_rdmsr(struct CPUState *cpu)
@@ -758,7 +758,7 @@ void simulate_rdmsr(struct CPUState *cpu)
static void exec_rdmsr(struct CPUX86State *env, struct x86_decode *decode)
{
simulate_rdmsr(env_cpu(env));
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
void simulate_wrmsr(struct CPUState *cpu)
@@ -853,7 +853,7 @@ void simulate_wrmsr(struct CPUState *cpu)
static void exec_wrmsr(struct CPUX86State *env, struct x86_decode *decode)
{
simulate_wrmsr(env_cpu(env));
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
/*
@@ -909,25 +909,25 @@ static void do_bt(struct CPUX86State *env, struct x86_decode *decode, int flag)
static void exec_bt(struct CPUX86State *env, struct x86_decode *decode)
{
do_bt(env, decode, 0);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_btc(struct CPUX86State *env, struct x86_decode *decode)
{
do_bt(env, decode, 1);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_btr(struct CPUX86State *env, struct x86_decode *decode)
{
do_bt(env, decode, 3);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_bts(struct CPUX86State *env, struct x86_decode *decode)
{
do_bt(env, decode, 2);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
void exec_shl(struct CPUX86State *env, struct x86_decode *decode)
@@ -991,7 +991,7 @@ void exec_shl(struct CPUX86State *env, struct x86_decode *decode)
exit:
/* lflags_to_rflags(env); */
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
void exec_movsx(CPUX86State *env, struct x86_decode *decode)
@@ -1014,7 +1014,7 @@ void exec_movsx(CPUX86State *env, struct x86_decode *decode)
write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
void exec_ror(struct CPUX86State *env, struct x86_decode *decode)
@@ -1092,7 +1092,7 @@ void exec_ror(struct CPUX86State *env, struct x86_decode *decode)
break;
}
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
void exec_rol(struct CPUX86State *env, struct x86_decode *decode)
@@ -1173,7 +1173,7 @@ void exec_rol(struct CPUX86State *env, struct x86_decode *decode)
break;
}
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
@@ -1259,7 +1259,7 @@ void exec_rcl(struct CPUX86State *env, struct x86_decode *decode)
break;
}
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
void exec_rcr(struct CPUX86State *env, struct x86_decode *decode)
@@ -1334,7 +1334,7 @@ void exec_rcr(struct CPUX86State *env, struct x86_decode *decode)
break;
}
}
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_xchg(struct CPUX86State *env, struct x86_decode *decode)
@@ -1346,7 +1346,7 @@ static void exec_xchg(struct CPUX86State *env, struct x86_decode *decode)
write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
decode->operand_size);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static void exec_xadd(struct CPUX86State *env, struct x86_decode *decode)
@@ -1355,7 +1355,7 @@ static void exec_xadd(struct CPUX86State *env, struct x86_decode *decode)
write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
decode->operand_size);
- RIP(env) += decode->len;
+ env->eip += decode->len;
}
static struct cmd_handler {
@@ -1432,9 +1432,9 @@ void load_regs(struct CPUState *cpu)
RRX(env, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);
}
- RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
+ env->eflags = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
rflags_to_lflags(env);
- RIP(env) = rreg(cpu->hvf_fd, HV_X86_RIP);
+ env->eip = rreg(cpu->hvf_fd, HV_X86_RIP);
}
void store_regs(struct CPUState *cpu)
@@ -1456,21 +1456,21 @@ void store_regs(struct CPUState *cpu)
}
lflags_to_rflags(env);
- wreg(cpu->hvf_fd, HV_X86_RFLAGS, RFLAGS(env));
- macvm_set_rip(cpu, RIP(env));
+ wreg(cpu->hvf_fd, HV_X86_RFLAGS, env->eflags);
+ macvm_set_rip(cpu, env->eip);
}
bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins)
{
/*if (hvf_vcpu_id(cpu))
- printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cpu), RIP(cpu),
+ printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cpu), env->eip,
decode_cmd_to_string(ins->cmd));*/
if (!_cmd_handler[ins->cmd].handler) {
- printf("Unimplemented handler (%llx) for %d (%x %x) \n", RIP(env),
+ printf("Unimplemented handler (%llx) for %d (%x %x) \n", env->eip,
ins->cmd, ins->opcode[0],
ins->opcode_len > 1 ? ins->opcode[1] : 0);
- RIP(env) += ins->len;
+ env->eip += ins->len;
return true;
}
diff --git a/target/i386/hvf/x86_flags.c b/target/i386/hvf/x86_flags.c
index ee6d33f861..5ca4f41f5c 100644
--- a/target/i386/hvf/x86_flags.c
+++ b/target/i386/hvf/x86_flags.c
@@ -63,7 +63,7 @@
#define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \
target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
(((lf_carries) >> (size - 2)) << LF_BIT_PO); \
- env->hvf_emul->lflags.result = (target_ulong)(int##size##_t)(lf_result); \
+ env->hvf_lflags.result = (target_ulong)(int##size##_t)(lf_result); \
if ((size) == 32) { \
temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
} else if ((size) == 16) { \
@@ -73,7 +73,7 @@
} else { \
VM_PANIC("unimplemented"); \
} \
- env->hvf_emul->lflags.auxbits = (target_ulong)(uint32_t)temp; \
+ env->hvf_lflags.auxbits = (target_ulong)(uint32_t)temp; \
}
/* carries, result */
@@ -100,10 +100,10 @@
} else { \
VM_PANIC("unimplemented"); \
} \
- env->hvf_emul->lflags.result = (target_ulong)(int##size##_t)(lf_result); \
- target_ulong delta_c = (env->hvf_emul->lflags.auxbits ^ temp) & LF_MASK_CF; \
+ env->hvf_lflags.result = (target_ulong)(int##size##_t)(lf_result); \
+ target_ulong delta_c = (env->hvf_lflags.auxbits ^ temp) & LF_MASK_CF; \
delta_c ^= (delta_c >> 1); \
- env->hvf_emul->lflags.auxbits = (target_ulong)(uint32_t)(temp ^ delta_c); \
+ env->hvf_lflags.auxbits = (target_ulong)(uint32_t)(temp ^ delta_c); \
}
/* carries, result */
@@ -117,9 +117,8 @@
void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf)
{
uint32_t temp_po = new_of ^ new_cf;
- env->hvf_emul->lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);
- env->hvf_emul->lflags.auxbits |= (temp_po << LF_BIT_PO) |
- (new_cf << LF_BIT_CF);
+ env->hvf_lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);
+ env->hvf_lflags.auxbits |= (temp_po << LF_BIT_PO) | (new_cf << LF_BIT_CF);
}
void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
@@ -215,27 +214,27 @@ void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
bool get_PF(CPUX86State *env)
{
- uint32_t temp = (255 & env->hvf_emul->lflags.result);
- temp = temp ^ (255 & (env->hvf_emul->lflags.auxbits >> LF_BIT_PDB));
+ uint32_t temp = (255 & env->hvf_lflags.result);
+ temp = temp ^ (255 & (env->hvf_lflags.auxbits >> LF_BIT_PDB));
temp = (temp ^ (temp >> 4)) & 0x0F;
return (0x9669U >> temp) & 1;
}
void set_PF(CPUX86State *env, bool val)
{
- uint32_t temp = (255 & env->hvf_emul->lflags.result) ^ (!val);
- env->hvf_emul->lflags.auxbits &= ~(LF_MASK_PDB);
- env->hvf_emul->lflags.auxbits |= (temp << LF_BIT_PDB);
+ uint32_t temp = (255 & env->hvf_lflags.result) ^ (!val);
+ env->hvf_lflags.auxbits &= ~(LF_MASK_PDB);
+ env->hvf_lflags.auxbits |= (temp << LF_BIT_PDB);
}
bool get_OF(CPUX86State *env)
{
- return ((env->hvf_emul->lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1;
+ return ((env->hvf_lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1;
}
bool get_CF(CPUX86State *env)
{
- return (env->hvf_emul->lflags.auxbits >> LF_BIT_CF) & 1;
+ return (env->hvf_lflags.auxbits >> LF_BIT_CF) & 1;
}
void set_OF(CPUX86State *env, bool val)
@@ -252,64 +251,64 @@ void set_CF(CPUX86State *env, bool val)
bool get_AF(CPUX86State *env)
{
- return (env->hvf_emul->lflags.auxbits >> LF_BIT_AF) & 1;
+ return (env->hvf_lflags.auxbits >> LF_BIT_AF) & 1;
}
void set_AF(CPUX86State *env, bool val)
{
- env->hvf_emul->lflags.auxbits &= ~(LF_MASK_AF);
- env->hvf_emul->lflags.auxbits |= val << LF_BIT_AF;
+ env->hvf_lflags.auxbits &= ~(LF_MASK_AF);
+ env->hvf_lflags.auxbits |= val << LF_BIT_AF;
}
bool get_ZF(CPUX86State *env)
{
- return !env->hvf_emul->lflags.result;
+ return !env->hvf_lflags.result;
}
void set_ZF(CPUX86State *env, bool val)
{
if (val) {
- env->hvf_emul->lflags.auxbits ^=
- (((env->hvf_emul->lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);
+ env->hvf_lflags.auxbits ^=
+ (((env->hvf_lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);
/* merge the parity bits into the Parity Delta Byte */
- uint32_t temp_pdb = (255 & env->hvf_emul->lflags.result);
- env->hvf_emul->lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);
+ uint32_t temp_pdb = (255 & env->hvf_lflags.result);
+ env->hvf_lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);
/* now zero the .result value */
- env->hvf_emul->lflags.result = 0;
+ env->hvf_lflags.result = 0;
} else {
- env->hvf_emul->lflags.result |= (1 << 8);
+ env->hvf_lflags.result |= (1 << 8);
}
}
bool get_SF(CPUX86State *env)
{
- return ((env->hvf_emul->lflags.result >> LF_SIGN_BIT) ^
- (env->hvf_emul->lflags.auxbits >> LF_BIT_SD)) & 1;
+ return ((env->hvf_lflags.result >> LF_SIGN_BIT) ^
+ (env->hvf_lflags.auxbits >> LF_BIT_SD)) & 1;
}
void set_SF(CPUX86State *env, bool val)
{
bool temp_sf = get_SF(env);
- env->hvf_emul->lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD;
+ env->hvf_lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD;
}
void lflags_to_rflags(CPUX86State *env)
{
- env->hvf_emul->rflags.cf = get_CF(env);
- env->hvf_emul->rflags.pf = get_PF(env);
- env->hvf_emul->rflags.af = get_AF(env);
- env->hvf_emul->rflags.zf = get_ZF(env);
- env->hvf_emul->rflags.sf = get_SF(env);
- env->hvf_emul->rflags.of = get_OF(env);
+ env->eflags |= get_CF(env) ? CC_C : 0;
+ env->eflags |= get_PF(env) ? CC_P : 0;
+ env->eflags |= get_AF(env) ? CC_A : 0;
+ env->eflags |= get_ZF(env) ? CC_Z : 0;
+ env->eflags |= get_SF(env) ? CC_S : 0;
+ env->eflags |= get_OF(env) ? CC_O : 0;
}
void rflags_to_lflags(CPUX86State *env)
{
- env->hvf_emul->lflags.auxbits = env->hvf_emul->lflags.result = 0;
- set_OF(env, env->hvf_emul->rflags.of);
- set_SF(env, env->hvf_emul->rflags.sf);
- set_ZF(env, env->hvf_emul->rflags.zf);
- set_AF(env, env->hvf_emul->rflags.af);
- set_PF(env, env->hvf_emul->rflags.pf);
- set_CF(env, env->hvf_emul->rflags.cf);
+ env->hvf_lflags.auxbits = env->hvf_lflags.result = 0;
+ set_OF(env, env->eflags & CC_O);
+ set_SF(env, env->eflags & CC_S);
+ set_ZF(env, env->eflags & CC_Z);
+ set_AF(env, env->eflags & CC_A);
+ set_PF(env, env->eflags & CC_P);
+ set_CF(env, env->eflags & CC_C);
}
diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c
index 1daac6cc2b..6f04478b3a 100644
--- a/target/i386/hvf/x86_task.c
+++ b/target/i386/hvf/x86_task.c
@@ -38,8 +38,8 @@ static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
CPUX86State *env = &x86_cpu->env;
/* CR3 and ldt selector are not saved intentionally */
- tss->eip = EIP(env);
- tss->eflags = EFLAGS(env);
+ tss->eip = (uint32_t)env->eip;
+ tss->eflags = (uint32_t)env->eflags;
tss->eax = EAX(env);
tss->ecx = ECX(env);
tss->edx = EDX(env);
@@ -64,8 +64,8 @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3);
- RIP(env) = tss->eip;
- EFLAGS(env) = tss->eflags | 2;
+ env->eip = tss->eip;
+ env->eflags = tss->eflags | 2;
/* General purpose registers */
RAX(env) = tss->eax;
@@ -158,7 +158,7 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
}
if (reason == TSR_IRET)
- EFLAGS(env) &= ~RFLAGS_NT;
+ env->eflags &= ~NT_MASK;
if (reason != TSR_CALL && reason != TSR_IDT_GATE)
old_tss_sel.sel = 0xffff;
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
index edefe5319a..5cbcb32ab6 100644
--- a/target/i386/hvf/x86hvf.c
+++ b/target/i386/hvf/x86hvf.c
@@ -412,7 +412,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
if (!(env->hflags & HF_INHIBIT_IRQ_MASK) &&
(cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
- (EFLAGS(env) & IF_MASK) && !(info & VMCS_INTR_VALID)) {
+ (env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) {
int line = cpu_get_pic_interrupt(&x86cpu->env);
cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;
if (line >= 0) {
@@ -432,7 +432,7 @@ int hvf_process_events(CPUState *cpu_state)
X86CPU *cpu = X86_CPU(cpu_state);
CPUX86State *env = &cpu->env;
- EFLAGS(env) = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
+ env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
hvf_cpu_synchronize_state(cpu_state);
@@ -444,7 +444,7 @@ int hvf_process_events(CPUState *cpu_state)
apic_poll_irq(cpu->apic_state);
}
if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
- (EFLAGS(env) & IF_MASK)) ||
+ (env->eflags & IF_MASK)) ||
(cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {
cpu_state->halted = 0;
}
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index 34f838728d..b3c13cb898 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -59,6 +59,10 @@
do { } while (0)
#endif
+/* From arch/x86/kvm/lapic.h */
+#define KVM_APIC_BUS_CYCLE_NS 1
+#define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS)
+
#define MSR_KVM_WALL_CLOCK 0x11
#define MSR_KVM_SYSTEM_TIME 0x12
@@ -106,6 +110,7 @@ static bool has_msr_core_capabs;
static bool has_msr_vmx_vmfunc;
static bool has_msr_ucode_rev;
static bool has_msr_vmx_procbased_ctls2;
+static bool has_msr_perf_capabs;
static uint32_t has_architectural_pmu_version;
static uint32_t num_architectural_pmu_gp_counters;
@@ -1469,6 +1474,8 @@ int kvm_arch_init_vcpu(CPUState *cs)
}
}
+ env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY;
+
/* Paravirtualization CPUIDs */
r = hyperv_handle_properties(cs, cpuid_data.entries);
if (r < 0) {
@@ -1735,7 +1742,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
}
}
- qemu_add_vm_change_state_handler(cpu_update_state, env);
+ cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env);
c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
if (c) {
@@ -1773,9 +1780,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
c = &cpuid_data.entries[cpuid_i++];
c->function = KVM_CPUID_SIGNATURE | 0x10;
c->eax = env->tsc_khz;
- /* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's
- * APIC_BUS_CYCLE_NS */
- c->ebx = 1000000;
+ c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */
c->ecx = c->edx = 0;
c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
@@ -1848,6 +1853,8 @@ int kvm_arch_destroy_vcpu(CPUState *cs)
env->nested_state = NULL;
}
+ qemu_del_vm_change_state_handler(cpu->vmsentry);
+
return 0;
}
@@ -2027,6 +2034,9 @@ static int kvm_get_supported_msrs(KVMState *s)
case MSR_IA32_CORE_CAPABILITY:
has_msr_core_capabs = true;
break;
+ case MSR_IA32_PERF_CAPABILITIES:
+ has_msr_perf_capabs = true;
+ break;
case MSR_IA32_VMX_VMFUNC:
has_msr_vmx_vmfunc = true;
break;
@@ -2643,6 +2653,18 @@ static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f)
VMCS12_MAX_FIELD_INDEX << 1);
}
+static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f)
+{
+ uint64_t kvm_perf_cap =
+ kvm_arch_get_supported_msr_feature(kvm_state,
+ MSR_IA32_PERF_CAPABILITIES);
+
+ if (kvm_perf_cap) {
+ kvm_msr_entry_add(cpu, MSR_IA32_PERF_CAPABILITIES,
+ kvm_perf_cap & f[FEAT_PERF_CAPABILITIES]);
+ }
+}
+
static int kvm_buf_set_msrs(X86CPU *cpu)
{
int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
@@ -2675,6 +2697,10 @@ static void kvm_init_msrs(X86CPU *cpu)
env->features[FEAT_CORE_CAPABILITY]);
}
+ if (has_msr_perf_capabs && cpu->enable_pmu) {
+ kvm_msr_entry_add_perf(cpu, env->features);
+ }
+
if (has_msr_ucode_rev) {
kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
}
diff --git a/target/i386/misc_helper.c b/target/i386/misc_helper.c
index 7d61221024..b6b1d41b14 100644
--- a/target/i386/misc_helper.c
+++ b/target/i386/misc_helper.c
@@ -70,7 +70,7 @@ target_ulong helper_inw(CPUX86State *env, uint32_t port)
void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
{
#ifdef CONFIG_USER_ONLY
- fprintf(stderr, "outw: port=0x%04x, data=%08x\n", port, data);
+ fprintf(stderr, "outl: port=0x%04x, data=%08x\n", port, data);
#else
address_space_stl(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
index 4658768de2..14f2b16abd 100644
--- a/target/i386/ops_sse.h
+++ b/target/i386/ops_sse.h
@@ -1435,34 +1435,47 @@ void glue(helper_pshufb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
void glue(helper_phaddw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
{
- d->W(0) = (int16_t)d->W(0) + (int16_t)d->W(1);
- d->W(1) = (int16_t)d->W(2) + (int16_t)d->W(3);
- XMM_ONLY(d->W(2) = (int16_t)d->W(4) + (int16_t)d->W(5));
- XMM_ONLY(d->W(3) = (int16_t)d->W(6) + (int16_t)d->W(7));
- d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) + (int16_t)s->W(1);
- d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) + (int16_t)s->W(3);
- XMM_ONLY(d->W(6) = (int16_t)s->W(4) + (int16_t)s->W(5));
- XMM_ONLY(d->W(7) = (int16_t)s->W(6) + (int16_t)s->W(7));
+
+ Reg r;
+
+ r.W(0) = (int16_t)d->W(0) + (int16_t)d->W(1);
+ r.W(1) = (int16_t)d->W(2) + (int16_t)d->W(3);
+ XMM_ONLY(r.W(2) = (int16_t)d->W(4) + (int16_t)d->W(5));
+ XMM_ONLY(r.W(3) = (int16_t)d->W(6) + (int16_t)d->W(7));
+ r.W((2 << SHIFT) + 0) = (int16_t)s->W(0) + (int16_t)s->W(1);
+ r.W((2 << SHIFT) + 1) = (int16_t)s->W(2) + (int16_t)s->W(3);
+ XMM_ONLY(r.W(6) = (int16_t)s->W(4) + (int16_t)s->W(5));
+ XMM_ONLY(r.W(7) = (int16_t)s->W(6) + (int16_t)s->W(7));
+
+ *d = r;
}
void glue(helper_phaddd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
{
- d->L(0) = (int32_t)d->L(0) + (int32_t)d->L(1);
- XMM_ONLY(d->L(1) = (int32_t)d->L(2) + (int32_t)d->L(3));
- d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) + (int32_t)s->L(1);
- XMM_ONLY(d->L(3) = (int32_t)s->L(2) + (int32_t)s->L(3));
+ Reg r;
+
+ r.L(0) = (int32_t)d->L(0) + (int32_t)d->L(1);
+ XMM_ONLY(r.L(1) = (int32_t)d->L(2) + (int32_t)d->L(3));
+ r.L((1 << SHIFT) + 0) = (int32_t)s->L(0) + (int32_t)s->L(1);
+ XMM_ONLY(r.L(3) = (int32_t)s->L(2) + (int32_t)s->L(3));
+
+ *d = r;
}
void glue(helper_phaddsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
{
- d->W(0) = satsw((int16_t)d->W(0) + (int16_t)d->W(1));
- d->W(1) = satsw((int16_t)d->W(2) + (int16_t)d->W(3));
- XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) + (int16_t)d->W(5)));
- XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) + (int16_t)d->W(7)));
- d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) + (int16_t)s->W(1));
- d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) + (int16_t)s->W(3));
- XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) + (int16_t)s->W(5)));
- XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) + (int16_t)s->W(7)));
+ Reg r;
+
+ r.W(0) = satsw((int16_t)d->W(0) + (int16_t)d->W(1));
+ r.W(1) = satsw((int16_t)d->W(2) + (int16_t)d->W(3));
+ XMM_ONLY(r.W(2) = satsw((int16_t)d->W(4) + (int16_t)d->W(5)));
+ XMM_ONLY(r.W(3) = satsw((int16_t)d->W(6) + (int16_t)d->W(7)));
+ r.W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) + (int16_t)s->W(1));
+ r.W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) + (int16_t)s->W(3));
+ XMM_ONLY(r.W(6) = satsw((int16_t)s->W(4) + (int16_t)s->W(5)));
+ XMM_ONLY(r.W(7) = satsw((int16_t)s->W(6) + (int16_t)s->W(7)));
+
+ *d = r;
}
void glue(helper_pmaddubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
@@ -2076,10 +2089,10 @@ static inline unsigned pcmpxstrx(CPUX86State *env, Reg *d, Reg *s,
res = (2 << upper) - 1;
break;
}
- for (j = valids - validd; j >= 0; j--) {
+ for (j = valids == upper ? valids : valids - validd; j >= 0; j--) {
res <<= 1;
v = 1;
- for (i = validd; i >= 0; i--) {
+ for (i = MIN(valids - j, validd); i >= 0; i--) {
v &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i));
}
res |= v;
diff --git a/target/i386/sev.c b/target/i386/sev.c
index 51cdbe5496..d273174ad3 100644
--- a/target/i386/sev.c
+++ b/target/i386/sev.c
@@ -29,10 +29,48 @@
#include "trace.h"
#include "migration/blocker.h"
+#define TYPE_SEV_GUEST "sev-guest"
+#define SEV_GUEST(obj) \
+ OBJECT_CHECK(SevGuestState, (obj), TYPE_SEV_GUEST)
+
+typedef struct SevGuestState SevGuestState;
+
+/**
+ * SevGuestState:
+ *
+ * The SevGuestState object is used for creating and managing a SEV
+ * guest.
+ *
+ * # $QEMU \
+ * -object sev-guest,id=sev0 \
+ * -machine ...,memory-encryption=sev0
+ */
+struct SevGuestState {
+ Object parent_obj;
+
+ /* configuration parameters */
+ char *sev_device;
+ uint32_t policy;
+ char *dh_cert_file;
+ char *session_file;
+ uint32_t cbitpos;
+ uint32_t reduced_phys_bits;
+
+ /* runtime state */
+ uint32_t handle;
+ uint8_t api_major;
+ uint8_t api_minor;
+ uint8_t build_id;
+ uint64_t me_mask;
+ int sev_fd;
+ SevState state;
+ gchar *measurement;
+};
+
#define DEFAULT_GUEST_POLICY 0x1 /* disable debug */
#define DEFAULT_SEV_DEVICE "/dev/sev"
-static SEVState *sev_state;
+static SevGuestState *sev_guest;
static Error *sev_mig_blocker;
static const char *const sev_fw_errlist[] = {
@@ -111,21 +149,21 @@ fw_error_to_str(int code)
}
static bool
-sev_check_state(SevState state)
+sev_check_state(const SevGuestState *sev, SevState state)
{
- assert(sev_state);
- return sev_state->state == state ? true : false;
+ assert(sev);
+ return sev->state == state ? true : false;
}
static void
-sev_set_guest_state(SevState new_state)
+sev_set_guest_state(SevGuestState *sev, SevState new_state)
{
assert(new_state < SEV_STATE__MAX);
- assert(sev_state);
+ assert(sev);
- trace_kvm_sev_change_state(SevState_str(sev_state->state),
+ trace_kvm_sev_change_state(SevState_str(sev->state),
SevState_str(new_state));
- sev_state->state = new_state;
+ sev->state = new_state;
}
static void
@@ -191,82 +229,82 @@ static struct RAMBlockNotifier sev_ram_notifier = {
};
static void
-qsev_guest_finalize(Object *obj)
+sev_guest_finalize(Object *obj)
{
}
static char *
-qsev_guest_get_session_file(Object *obj, Error **errp)
+sev_guest_get_session_file(Object *obj, Error **errp)
{
- QSevGuestInfo *s = QSEV_GUEST_INFO(obj);
+ SevGuestState *s = SEV_GUEST(obj);
return s->session_file ? g_strdup(s->session_file) : NULL;
}
static void
-qsev_guest_set_session_file(Object *obj, const char *value, Error **errp)
+sev_guest_set_session_file(Object *obj, const char *value, Error **errp)
{
- QSevGuestInfo *s = QSEV_GUEST_INFO(obj);
+ SevGuestState *s = SEV_GUEST(obj);
s->session_file = g_strdup(value);
}
static char *
-qsev_guest_get_dh_cert_file(Object *obj, Error **errp)
+sev_guest_get_dh_cert_file(Object *obj, Error **errp)
{
- QSevGuestInfo *s = QSEV_GUEST_INFO(obj);
+ SevGuestState *s = SEV_GUEST(obj);
return g_strdup(s->dh_cert_file);
}
static void
-qsev_guest_set_dh_cert_file(Object *obj, const char *value, Error **errp)
+sev_guest_set_dh_cert_file(Object *obj, const char *value, Error **errp)
{
- QSevGuestInfo *s = QSEV_GUEST_INFO(obj);
+ SevGuestState *s = SEV_GUEST(obj);
s->dh_cert_file = g_strdup(value);
}
static char *
-qsev_guest_get_sev_device(Object *obj, Error **errp)
+sev_guest_get_sev_device(Object *obj, Error **errp)
{
- QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
+ SevGuestState *sev = SEV_GUEST(obj);
return g_strdup(sev->sev_device);
}
static void
-qsev_guest_set_sev_device(Object *obj, const char *value, Error **errp)
+sev_guest_set_sev_device(Object *obj, const char *value, Error **errp)
{
- QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
+ SevGuestState *sev = SEV_GUEST(obj);
sev->sev_device = g_strdup(value);
}
static void
-qsev_guest_class_init(ObjectClass *oc, void *data)
+sev_guest_class_init(ObjectClass *oc, void *data)
{
object_class_property_add_str(oc, "sev-device",
- qsev_guest_get_sev_device,
- qsev_guest_set_sev_device);
+ sev_guest_get_sev_device,
+ sev_guest_set_sev_device);
object_class_property_set_description(oc, "sev-device",
"SEV device to use");
object_class_property_add_str(oc, "dh-cert-file",
- qsev_guest_get_dh_cert_file,
- qsev_guest_set_dh_cert_file);
+ sev_guest_get_dh_cert_file,
+ sev_guest_set_dh_cert_file);
object_class_property_set_description(oc, "dh-cert-file",
"guest owners DH certificate (encoded with base64)");
object_class_property_add_str(oc, "session-file",
- qsev_guest_get_session_file,
- qsev_guest_set_session_file);
+ sev_guest_get_session_file,
+ sev_guest_set_session_file);
object_class_property_set_description(oc, "session-file",
"guest owners session parameters (encoded with base64)");
}
static void
-qsev_guest_init(Object *obj)
+sev_guest_instance_init(Object *obj)
{
- QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
+ SevGuestState *sev = SEV_GUEST(obj);
sev->sev_device = g_strdup(DEFAULT_SEV_DEVICE);
sev->policy = DEFAULT_GUEST_POLICY;
@@ -282,33 +320,32 @@ qsev_guest_init(Object *obj)
}
/* sev guest info */
-static const TypeInfo qsev_guest_info = {
+static const TypeInfo sev_guest_info = {
.parent = TYPE_OBJECT,
- .name = TYPE_QSEV_GUEST_INFO,
- .instance_size = sizeof(QSevGuestInfo),
- .instance_finalize = qsev_guest_finalize,
- .class_size = sizeof(QSevGuestInfoClass),
- .class_init = qsev_guest_class_init,
- .instance_init = qsev_guest_init,
+ .name = TYPE_SEV_GUEST,
+ .instance_size = sizeof(SevGuestState),
+ .instance_finalize = sev_guest_finalize,
+ .class_init = sev_guest_class_init,
+ .instance_init = sev_guest_instance_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
};
-static QSevGuestInfo *
+static SevGuestState *
lookup_sev_guest_info(const char *id)
{
Object *obj;
- QSevGuestInfo *info;
+ SevGuestState *info;
obj = object_resolve_path_component(object_get_objects_root(), id);
if (!obj) {
return NULL;
}
- info = (QSevGuestInfo *)
- object_dynamic_cast(obj, TYPE_QSEV_GUEST_INFO);
+ info = (SevGuestState *)
+ object_dynamic_cast(obj, TYPE_SEV_GUEST);
if (!info) {
return NULL;
}
@@ -319,25 +356,25 @@ lookup_sev_guest_info(const char *id)
bool
sev_enabled(void)
{
- return sev_state ? true : false;
+ return !!sev_guest;
}
uint64_t
sev_get_me_mask(void)
{
- return sev_state ? sev_state->me_mask : ~0;
+ return sev_guest ? sev_guest->me_mask : ~0;
}
uint32_t
sev_get_cbit_position(void)
{
- return sev_state ? sev_state->cbitpos : 0;
+ return sev_guest ? sev_guest->cbitpos : 0;
}
uint32_t
sev_get_reduced_phys_bits(void)
{
- return sev_state ? sev_state->reduced_phys_bits : 0;
+ return sev_guest ? sev_guest->reduced_phys_bits : 0;
}
SevInfo *
@@ -346,15 +383,15 @@ sev_get_info(void)
SevInfo *info;
info = g_new0(SevInfo, 1);
- info->enabled = sev_state ? true : false;
+ info->enabled = sev_enabled();
if (info->enabled) {
- info->api_major = sev_state->api_major;
- info->api_minor = sev_state->api_minor;
- info->build_id = sev_state->build_id;
- info->policy = sev_state->policy;
- info->state = sev_state->state;
- info->handle = sev_state->handle;
+ info->api_major = sev_guest->api_major;
+ info->api_minor = sev_guest->api_minor;
+ info->build_id = sev_guest->build_id;
+ info->policy = sev_guest->policy;
+ info->state = sev_guest->state;
+ info->handle = sev_guest->handle;
}
return info;
@@ -462,21 +499,18 @@ sev_read_file_base64(const char *filename, guchar **data, gsize *len)
}
static int
-sev_launch_start(SEVState *s)
+sev_launch_start(SevGuestState *sev)
{
gsize sz;
int ret = 1;
int fw_error, rc;
- QSevGuestInfo *sev = s->sev_info;
struct kvm_sev_launch_start *start;
guchar *session = NULL, *dh_cert = NULL;
start = g_new0(struct kvm_sev_launch_start, 1);
- start->handle = object_property_get_int(OBJECT(sev), "handle",
- &error_abort);
- start->policy = object_property_get_int(OBJECT(sev), "policy",
- &error_abort);
+ start->handle = sev->handle;
+ start->policy = sev->policy;
if (sev->session_file) {
if (sev_read_file_base64(sev->session_file, &session, &sz) < 0) {
goto out;
@@ -494,18 +528,15 @@ sev_launch_start(SEVState *s)
}
trace_kvm_sev_launch_start(start->policy, session, dh_cert);
- rc = sev_ioctl(s->sev_fd, KVM_SEV_LAUNCH_START, start, &fw_error);
+ rc = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_START, start, &fw_error);
if (rc < 0) {
error_report("%s: LAUNCH_START ret=%d fw_error=%d '%s'",
__func__, ret, fw_error, fw_error_to_str(fw_error));
goto out;
}
- object_property_set_int(OBJECT(sev), start->handle, "handle",
- &error_abort);
- sev_set_guest_state(SEV_STATE_LAUNCH_UPDATE);
- s->handle = start->handle;
- s->policy = start->policy;
+ sev_set_guest_state(sev, SEV_STATE_LAUNCH_UPDATE);
+ sev->handle = start->handle;
ret = 0;
out:
@@ -516,7 +547,7 @@ out:
}
static int
-sev_launch_update_data(uint8_t *addr, uint64_t len)
+sev_launch_update_data(SevGuestState *sev, uint8_t *addr, uint64_t len)
{
int ret, fw_error;
struct kvm_sev_launch_update_data update;
@@ -528,7 +559,7 @@ sev_launch_update_data(uint8_t *addr, uint64_t len)
update.uaddr = (__u64)(unsigned long)addr;
update.len = len;
trace_kvm_sev_launch_update_data(addr, len);
- ret = sev_ioctl(sev_state->sev_fd, KVM_SEV_LAUNCH_UPDATE_DATA,
+ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_UPDATE_DATA,
&update, &fw_error);
if (ret) {
error_report("%s: LAUNCH_UPDATE ret=%d fw_error=%d '%s'",
@@ -541,19 +572,19 @@ sev_launch_update_data(uint8_t *addr, uint64_t len)
static void
sev_launch_get_measure(Notifier *notifier, void *unused)
{
+ SevGuestState *sev = sev_guest;
int ret, error;
guchar *data;
- SEVState *s = sev_state;
struct kvm_sev_launch_measure *measurement;
- if (!sev_check_state(SEV_STATE_LAUNCH_UPDATE)) {
+ if (!sev_check_state(sev, SEV_STATE_LAUNCH_UPDATE)) {
return;
}
measurement = g_new0(struct kvm_sev_launch_measure, 1);
/* query the measurement blob length */
- ret = sev_ioctl(sev_state->sev_fd, KVM_SEV_LAUNCH_MEASURE,
+ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_MEASURE,
measurement, &error);
if (!measurement->len) {
error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'",
@@ -565,7 +596,7 @@ sev_launch_get_measure(Notifier *notifier, void *unused)
measurement->uaddr = (unsigned long)data;
/* get the measurement blob */
- ret = sev_ioctl(sev_state->sev_fd, KVM_SEV_LAUNCH_MEASURE,
+ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_MEASURE,
measurement, &error);
if (ret) {
error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'",
@@ -573,11 +604,11 @@ sev_launch_get_measure(Notifier *notifier, void *unused)
goto free_data;
}
- sev_set_guest_state(SEV_STATE_LAUNCH_SECRET);
+ sev_set_guest_state(sev, SEV_STATE_LAUNCH_SECRET);
/* encode the measurement value and emit the event */
- s->measurement = g_base64_encode(data, measurement->len);
- trace_kvm_sev_launch_measurement(s->measurement);
+ sev->measurement = g_base64_encode(data, measurement->len);
+ trace_kvm_sev_launch_measurement(sev->measurement);
free_data:
g_free(data);
@@ -588,9 +619,9 @@ free_measurement:
char *
sev_get_launch_measurement(void)
{
- if (sev_state &&
- sev_state->state >= SEV_STATE_LAUNCH_SECRET) {
- return g_strdup(sev_state->measurement);
+ if (sev_guest &&
+ sev_guest->state >= SEV_STATE_LAUNCH_SECRET) {
+ return g_strdup(sev_guest->measurement);
}
return NULL;
@@ -601,20 +632,20 @@ static Notifier sev_machine_done_notify = {
};
static void
-sev_launch_finish(SEVState *s)
+sev_launch_finish(SevGuestState *sev)
{
int ret, error;
Error *local_err = NULL;
trace_kvm_sev_launch_finish();
- ret = sev_ioctl(sev_state->sev_fd, KVM_SEV_LAUNCH_FINISH, 0, &error);
+ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_FINISH, 0, &error);
if (ret) {
error_report("%s: LAUNCH_FINISH ret=%d fw_error=%d '%s'",
__func__, ret, error, fw_error_to_str(error));
exit(1);
}
- sev_set_guest_state(SEV_STATE_RUNNING);
+ sev_set_guest_state(sev, SEV_STATE_RUNNING);
/* add migration blocker */
error_setg(&sev_mig_blocker,
@@ -630,11 +661,11 @@ sev_launch_finish(SEVState *s)
static void
sev_vm_state_change(void *opaque, int running, RunState state)
{
- SEVState *s = opaque;
+ SevGuestState *sev = opaque;
if (running) {
- if (!sev_check_state(SEV_STATE_RUNNING)) {
- sev_launch_finish(s);
+ if (!sev_check_state(sev, SEV_STATE_RUNNING)) {
+ sev_launch_finish(sev);
}
}
}
@@ -642,55 +673,52 @@ sev_vm_state_change(void *opaque, int running, RunState state)
void *
sev_guest_init(const char *id)
{
- SEVState *s;
+ SevGuestState *sev;
char *devname;
int ret, fw_error;
uint32_t ebx;
uint32_t host_cbitpos;
struct sev_user_data_status status = {};
- sev_state = s = g_new0(SEVState, 1);
- s->sev_info = lookup_sev_guest_info(id);
- if (!s->sev_info) {
+ sev = lookup_sev_guest_info(id);
+ if (!sev) {
error_report("%s: '%s' is not a valid '%s' object",
- __func__, id, TYPE_QSEV_GUEST_INFO);
+ __func__, id, TYPE_SEV_GUEST);
goto err;
}
- s->state = SEV_STATE_UNINIT;
+ sev_guest = sev;
+ sev->state = SEV_STATE_UNINIT;
host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL);
host_cbitpos = ebx & 0x3f;
- s->cbitpos = object_property_get_int(OBJECT(s->sev_info), "cbitpos", NULL);
- if (host_cbitpos != s->cbitpos) {
+ if (host_cbitpos != sev->cbitpos) {
error_report("%s: cbitpos check failed, host '%d' requested '%d'",
- __func__, host_cbitpos, s->cbitpos);
+ __func__, host_cbitpos, sev->cbitpos);
goto err;
}
- s->reduced_phys_bits = object_property_get_int(OBJECT(s->sev_info),
- "reduced-phys-bits", NULL);
- if (s->reduced_phys_bits < 1) {
+ if (sev->reduced_phys_bits < 1) {
error_report("%s: reduced_phys_bits check failed, it should be >=1,"
- " requested '%d'", __func__, s->reduced_phys_bits);
+ " requested '%d'", __func__, sev->reduced_phys_bits);
goto err;
}
- s->me_mask = ~(1UL << s->cbitpos);
+ sev->me_mask = ~(1UL << sev->cbitpos);
- devname = object_property_get_str(OBJECT(s->sev_info), "sev-device", NULL);
- s->sev_fd = open(devname, O_RDWR);
- if (s->sev_fd < 0) {
+ devname = object_property_get_str(OBJECT(sev), "sev-device", NULL);
+ sev->sev_fd = open(devname, O_RDWR);
+ if (sev->sev_fd < 0) {
error_report("%s: Failed to open %s '%s'", __func__,
devname, strerror(errno));
}
g_free(devname);
- if (s->sev_fd < 0) {
+ if (sev->sev_fd < 0) {
goto err;
}
- ret = sev_platform_ioctl(s->sev_fd, SEV_PLATFORM_STATUS, &status,
+ ret = sev_platform_ioctl(sev->sev_fd, SEV_PLATFORM_STATUS, &status,
&fw_error);
if (ret) {
error_report("%s: failed to get platform status ret=%d "
@@ -698,19 +726,19 @@ sev_guest_init(const char *id)
fw_error_to_str(fw_error));
goto err;
}
- s->build_id = status.build;
- s->api_major = status.api_major;
- s->api_minor = status.api_minor;
+ sev->build_id = status.build;
+ sev->api_major = status.api_major;
+ sev->api_minor = status.api_minor;
trace_kvm_sev_init();
- ret = sev_ioctl(s->sev_fd, KVM_SEV_INIT, NULL, &fw_error);
+ ret = sev_ioctl(sev->sev_fd, KVM_SEV_INIT, NULL, &fw_error);
if (ret) {
error_report("%s: failed to initialize ret=%d fw_error=%d '%s'",
__func__, ret, fw_error, fw_error_to_str(fw_error));
goto err;
}
- ret = sev_launch_start(s);
+ ret = sev_launch_start(sev);
if (ret) {
error_report("%s: failed to create encryption context", __func__);
goto err;
@@ -718,23 +746,24 @@ sev_guest_init(const char *id)
ram_block_notifier_add(&sev_ram_notifier);
qemu_add_machine_init_done_notifier(&sev_machine_done_notify);
- qemu_add_vm_change_state_handler(sev_vm_state_change, s);
+ qemu_add_vm_change_state_handler(sev_vm_state_change, sev);
- return s;
+ return sev;
err:
- g_free(sev_state);
- sev_state = NULL;
+ sev_guest = NULL;
return NULL;
}
int
sev_encrypt_data(void *handle, uint8_t *ptr, uint64_t len)
{
- assert(handle);
+ SevGuestState *sev = handle;
+
+ assert(sev);
/* if SEV is in update state then encrypt the data else do nothing */
- if (sev_check_state(SEV_STATE_LAUNCH_UPDATE)) {
- return sev_launch_update_data(ptr, len);
+ if (sev_check_state(sev, SEV_STATE_LAUNCH_UPDATE)) {
+ return sev_launch_update_data(sev, ptr, len);
}
return 0;
@@ -743,7 +772,7 @@ sev_encrypt_data(void *handle, uint8_t *ptr, uint64_t len)
static void
sev_register_types(void)
{
- type_register_static(&qsev_guest_info);
+ type_register_static(&sev_guest_info);
}
type_init(sev_register_types);
diff --git a/target/i386/sev_i386.h b/target/i386/sev_i386.h
index 8ada9d385d..8eb7de1bef 100644
--- a/target/i386/sev_i386.h
+++ b/target/i386/sev_i386.h
@@ -28,10 +28,6 @@
#define SEV_POLICY_DOMAIN 0x10
#define SEV_POLICY_SEV 0x20
-#define TYPE_QSEV_GUEST_INFO "sev-guest"
-#define QSEV_GUEST_INFO(obj) \
- OBJECT_CHECK(QSevGuestInfo, (obj), TYPE_QSEV_GUEST_INFO)
-
extern bool sev_enabled(void);
extern uint64_t sev_get_me_mask(void);
extern SevInfo *sev_get_info(void);
@@ -40,49 +36,4 @@ extern uint32_t sev_get_reduced_phys_bits(void);
extern char *sev_get_launch_measurement(void);
extern SevCapability *sev_get_capabilities(void);
-typedef struct QSevGuestInfo QSevGuestInfo;
-typedef struct QSevGuestInfoClass QSevGuestInfoClass;
-
-/**
- * QSevGuestInfo:
- *
- * The QSevGuestInfo object is used for creating a SEV guest.
- *
- * # $QEMU \
- * -object sev-guest,id=sev0 \
- * -machine ...,memory-encryption=sev0
- */
-struct QSevGuestInfo {
- Object parent_obj;
-
- char *sev_device;
- uint32_t policy;
- uint32_t handle;
- char *dh_cert_file;
- char *session_file;
- uint32_t cbitpos;
- uint32_t reduced_phys_bits;
-};
-
-struct QSevGuestInfoClass {
- ObjectClass parent_class;
-};
-
-struct SEVState {
- QSevGuestInfo *sev_info;
- uint8_t api_major;
- uint8_t api_minor;
- uint8_t build_id;
- uint32_t policy;
- uint64_t me_mask;
- uint32_t cbitpos;
- uint32_t reduced_phys_bits;
- uint32_t handle;
- int sev_fd;
- SevState state;
- gchar *measurement;
-};
-
-typedef struct SEVState SEVState;
-
#endif
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 1988b436cb..e7d382ac10 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1202,6 +1202,7 @@ PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr);
PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr);
PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc);
+#ifndef CONFIG_USER_ONLY
struct PPCVirtualHypervisorClass {
InterfaceClass parent;
void (*hypercall)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
@@ -1215,10 +1216,8 @@ struct PPCVirtualHypervisorClass {
void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry);
target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp);
-#ifndef CONFIG_USER_ONLY
void (*cpu_exec_enter)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
void (*cpu_exec_exit)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
-#endif
};
#define TYPE_PPC_VIRTUAL_HYPERVISOR "ppc-virtual-hypervisor"
@@ -1230,6 +1229,7 @@ struct PPCVirtualHypervisorClass {
#define PPC_VIRTUAL_HYPERVISOR_GET_CLASS(obj) \
OBJECT_GET_CLASS(PPCVirtualHypervisorClass, (obj), \
TYPE_PPC_VIRTUAL_HYPERVISOR)
+#endif /* CONFIG_USER_ONLY */
void ppc_cpu_do_interrupt(CPUState *cpu);
bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req);
diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h
index fcaf745516..701c0c262b 100644
--- a/target/ppc/kvm_ppc.h
+++ b/target/ppc/kvm_ppc.h
@@ -280,6 +280,17 @@ static inline bool kvmppc_has_cap_spapr_vfio(void)
return false;
}
+static inline void kvmppc_read_hptes(ppc_hash_pte64_t *hptes,
+ hwaddr ptex, int n)
+{
+ abort();
+}
+
+static inline void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
+{
+ abort();
+}
+
#endif /* !CONFIG_USER_ONLY */
static inline bool kvmppc_has_cap_epr(void)
@@ -310,17 +321,6 @@ static inline int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
abort();
}
-static inline void kvmppc_read_hptes(ppc_hash_pte64_t *hptes,
- hwaddr ptex, int n)
-{
- abort();
-}
-
-static inline void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
-{
- abort();
-}
-
static inline bool kvmppc_has_cap_fixup_hcalls(void)
{
abort();
diff --git a/target/ppc/translate_init.inc.c b/target/ppc/translate_init.inc.c
index 38cb773ab4..a40888411c 100644
--- a/target/ppc/translate_init.inc.c
+++ b/target/ppc/translate_init.inc.c
@@ -10942,16 +10942,20 @@ static const TypeInfo ppc_cpu_type_info = {
.class_init = ppc_cpu_class_init,
};
+#ifndef CONFIG_USER_ONLY
static const TypeInfo ppc_vhyp_type_info = {
.name = TYPE_PPC_VIRTUAL_HYPERVISOR,
.parent = TYPE_INTERFACE,
.class_size = sizeof(PPCVirtualHypervisorClass),
};
+#endif
static void ppc_cpu_register_types(void)
{
type_register_static(&ppc_cpu_type_info);
+#ifndef CONFIG_USER_ONLY
type_register_static(&ppc_vhyp_type_info);
+#endif
}
type_init(ppc_cpu_register_types)
diff --git a/tests/docker/Makefile.include b/tests/docker/Makefile.include
index 981b7fcf2a..3e3617816e 100644
--- a/tests/docker/Makefile.include
+++ b/tests/docker/Makefile.include
@@ -55,7 +55,7 @@ docker-image-%: $(DOCKER_FILES_DIR)/%.docker
else
docker-image-%: $(DOCKER_FILES_DIR)/%.docker
$(call quiet-command,\
- $(DOCKER_SCRIPT) build qemu:$* $< \
+ $(DOCKER_SCRIPT) build -t qemu:$* -f $< \
$(if $V,,--quiet) $(if $(NOCACHE),--no-cache) \
$(if $(NOUSER),,--add-current-user) \
$(if $(EXTRA_FILES),--extra-files $(EXTRA_FILES))\
diff --git a/tests/docker/docker.py b/tests/docker/docker.py
index 5a9735db78..e630aae108 100755
--- a/tests/docker/docker.py
+++ b/tests/docker/docker.py
@@ -56,15 +56,19 @@ class EngineEnum(enum.IntEnum):
USE_ENGINE = EngineEnum.AUTO
+def _bytes_checksum(bytes):
+ """Calculate a digest string unique to the text content"""
+ return hashlib.sha1(bytes).hexdigest()
+
def _text_checksum(text):
"""Calculate a digest string unique to the text content"""
- return hashlib.sha1(text.encode('utf-8')).hexdigest()
+ return _bytes_checksum(text.encode('utf-8'))
def _read_dockerfile(path):
return open(path, 'rt', encoding='utf-8').read()
def _file_checksum(filename):
- return _text_checksum(_read_dockerfile(filename))
+ return _bytes_checksum(open(filename, 'rb').read())
def _guess_engine_command():
@@ -392,16 +396,16 @@ class BuildCommand(SubCommand):
help="""Specify a binary that will be copied to the
container together with all its dependent
libraries""")
- parser.add_argument("--extra-files", "-f", nargs='*',
+ parser.add_argument("--extra-files", nargs='*',
help="""Specify files that will be copied in the
Docker image, fulfilling the ADD directive from the
Dockerfile""")
parser.add_argument("--add-current-user", "-u", dest="user",
action="store_true",
help="Add the current user to image's passwd")
- parser.add_argument("tag",
+ parser.add_argument("-t", dest="tag",
help="Image Tag")
- parser.add_argument("dockerfile",
+ parser.add_argument("-f", dest="dockerfile",
help="Dockerfile name")
def run(self, args, argv):
diff --git a/tests/qtest/machine-none-test.c b/tests/qtest/machine-none-test.c
index 10d8ec26a9..8b7abea8af 100644
--- a/tests/qtest/machine-none-test.c
+++ b/tests/qtest/machine-none-test.c
@@ -33,8 +33,8 @@ static struct arch2cpu cpus_map[] = {
{ "cris", "crisv32" },
{ "lm32", "lm32-full" },
{ "m68k", "m5206" },
- /* FIXME: { "microblaze", "any" }, doesn't work with -M none -cpu any */
- /* FIXME: { "microblazeel", "any" }, doesn't work with -M none -cpu any */
+ { "microblaze", "any" },
+ { "microblazeel", "any" },
{ "mips", "4Kc" },
{ "mipsel", "I7200" },
{ "mips64", "20Kc" },
@@ -79,10 +79,8 @@ static void test_machine_cpu_cli(void)
QTestState *qts;
if (!cpu_model) {
- if (!(!strcmp(arch, "microblaze") || !strcmp(arch, "microblazeel"))) {
- fprintf(stderr, "WARNING: cpu name for target '%s' isn't defined,"
- " add it to cpus_map\n", arch);
- }
+ fprintf(stderr, "WARNING: cpu name for target '%s' isn't defined,"
+ " add it to cpus_map\n", arch);
return; /* TODO: die here to force all targets have a test */
}
qts = qtest_initf("-machine none -cpu '%s'", cpu_model);
diff --git a/tests/tcg/i386/Makefile.target b/tests/tcg/i386/Makefile.target
index 43ee2e181e..53efec0668 100644
--- a/tests/tcg/i386/Makefile.target
+++ b/tests/tcg/i386/Makefile.target
@@ -10,6 +10,9 @@ ALL_X86_TESTS=$(I386_SRCS:.c=)
SKIP_I386_TESTS=test-i386-ssse3
X86_64_TESTS:=$(filter test-i386-ssse3, $(ALL_X86_TESTS))
+test-i386-pcmpistri: CFLAGS += -msse4.2
+run-test-i386-pcmpistri: QEMU_OPTS += -cpu max
+
#
# hello-i386 is a barebones app
#
diff --git a/tests/tcg/i386/test-i386-fbstp.c b/tests/tcg/i386/test-i386-fbstp.c
new file mode 100644
index 0000000000..73bf56b9dc
--- /dev/null
+++ b/tests/tcg/i386/test-i386-fbstp.c
@@ -0,0 +1,140 @@
+/* Test fbstp instruction. */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+union u {
+ struct { uint64_t sig; uint16_t sign_exp; } s;
+ long double ld;
+};
+
+volatile union u ld_invalid_1 = { .s = { 1, 1234 } };
+volatile union u ld_invalid_2 = { .s = { 0, 1234 } };
+volatile union u ld_invalid_3 = { .s = { 0, 0x7fff } };
+volatile union u ld_invalid_4 = { .s = { (UINT64_C(1) << 63) - 1, 0x7fff } };
+
+int main(void)
+{
+ int ret = 0;
+ unsigned char out[10];
+ memset(out, 0xfe, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (-0.0L) : "st");
+ out[9] &= 0x80;
+ if (memcmp(out, "\0\0\0\0\0\0\0\0\0\x80", sizeof out) != 0) {
+ printf("FAIL: fbstp -0\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (-0.1L) : "st");
+ out[9] &= 0x80;
+ if (memcmp(out, "\0\0\0\0\0\0\0\0\0\x80", sizeof out) != 0) {
+ printf("FAIL: fbstp -0.1\n");
+ ret = 1;
+ }
+ memset(out, 0x1f, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (-987654321987654321.0L) :
+ "st");
+ out[9] &= 0x80;
+ if (memcmp(out, "\x21\x43\x65\x87\x19\x32\x54\x76\x98\x80",
+ sizeof out) != 0) {
+ printf("FAIL: fbstp -987654321987654321\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (999999999999999999.5L) :
+ "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp 999999999999999999.5\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (1000000000000000000.0L) :
+ "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp 1000000000000000000\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (1e30L) : "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp 1e30\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (-999999999999999999.5L) :
+ "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp -999999999999999999.5\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (-1000000000000000000.0L) :
+ "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp -1000000000000000000\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (-1e30L) : "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp -1e30\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (__builtin_infl()) : "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp inf\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (-__builtin_infl()) :
+ "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp -inf\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (__builtin_nanl("")) :
+ "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp nan\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (-__builtin_nanl("")) :
+ "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp -nan\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (ld_invalid_1.ld) :
+ "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp invalid 1\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (ld_invalid_2.ld) :
+ "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp invalid 2\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (ld_invalid_3.ld) :
+ "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp invalid 3\n");
+ ret = 1;
+ }
+ memset(out, 0x12, sizeof out);
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (ld_invalid_4.ld) :
+ "st");
+ if (memcmp(out, "\0\0\0\0\0\0\0\xc0\xff\xff", sizeof out) != 0) {
+ printf("FAIL: fbstp invalid 4\n");
+ ret = 1;
+ }
+ return ret;
+}
diff --git a/tests/tcg/i386/test-i386-fisttp.c b/tests/tcg/i386/test-i386-fisttp.c
new file mode 100644
index 0000000000..16af59a774
--- /dev/null
+++ b/tests/tcg/i386/test-i386-fisttp.c
@@ -0,0 +1,100 @@
+/* Test fisttpl and fisttpll instructions. */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+union u {
+ struct { uint64_t sig; uint16_t sign_exp; } s;
+ long double ld;
+};
+
+volatile union u ld_invalid_1 = { .s = { 1, 1234 } };
+
+int main(void)
+{
+ int ret = 0;
+ int32_t res_32;
+ int64_t res_64;
+ __asm__ volatile ("fisttpl %0" : "=m" (res_32) : "t" (0x1p100L) : "st");
+ if (res_32 != INT32_MIN) {
+ printf("FAIL: fisttpl 0x1p100\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpl %0" : "=m" (res_32) : "t" (-0x1p100L) : "st");
+ if (res_32 != INT32_MIN) {
+ printf("FAIL: fisttpl -0x1p100\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpl %0" : "=m" (res_32) : "t" (__builtin_infl()) :
+ "st");
+ if (res_32 != INT32_MIN) {
+ printf("FAIL: fisttpl inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpl %0" : "=m" (res_32) : "t" (-__builtin_infl()) :
+ "st");
+ if (res_32 != INT32_MIN) {
+ printf("FAIL: fisttpl -inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpl %0" : "=m" (res_32) : "t" (__builtin_nanl("")) :
+ "st");
+ if (res_32 != INT32_MIN) {
+ printf("FAIL: fisttpl nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpl %0" : "=m" (res_32) :
+ "t" (-__builtin_nanl("")) : "st");
+ if (res_32 != INT32_MIN) {
+ printf("FAIL: fisttpl -nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpl %0" : "=m" (res_32) : "t" (ld_invalid_1.ld) :
+ "st");
+ if (res_32 != INT32_MIN) {
+ printf("FAIL: fisttpl invalid\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpll %0" : "=m" (res_64) : "t" (0x1p100L) : "st");
+ if (res_64 != INT64_MIN) {
+ printf("FAIL: fisttpll 0x1p100\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpll %0" : "=m" (res_64) : "t" (-0x1p100L) : "st");
+ if (res_64 != INT64_MIN) {
+ printf("FAIL: fisttpll -0x1p100\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpll %0" : "=m" (res_64) : "t" (__builtin_infl()) :
+ "st");
+ if (res_64 != INT64_MIN) {
+ printf("FAIL: fisttpll inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpll %0" : "=m" (res_64) : "t" (-__builtin_infl()) :
+ "st");
+ if (res_64 != INT64_MIN) {
+ printf("FAIL: fisttpll -inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpll %0" : "=m" (res_64) :
+ "t" (__builtin_nanl("")) : "st");
+ if (res_64 != INT64_MIN) {
+ printf("FAIL: fisttpll nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpll %0" : "=m" (res_64) :
+ "t" (-__builtin_nanl("")) : "st");
+ if (res_64 != INT64_MIN) {
+ printf("FAIL: fisttpll -nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fisttpll %0" : "=m" (res_64) : "t" (ld_invalid_1.ld) :
+ "st");
+ if (res_64 != INT64_MIN) {
+ printf("FAIL: fisttpll invalid\n");
+ ret = 1;
+ }
+ return ret;
+}
diff --git a/tests/tcg/i386/test-i386-fldcst.c b/tests/tcg/i386/test-i386-fldcst.c
new file mode 100644
index 0000000000..e635432ccf
--- /dev/null
+++ b/tests/tcg/i386/test-i386-fldcst.c
@@ -0,0 +1,199 @@
+/* Test instructions loading floating-point constants. */
+
+#include <stdint.h>
+#include <stdio.h>
+
+volatile long double ld_res;
+
+int main(void)
+{
+ short cw;
+ int ret = 0;
+
+ /* Round to nearest. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x000;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldl2t" : "=t" (ld_res));
+ if (ld_res != 0x3.5269e12f346e2bf8p+0L) {
+ printf("FAIL: fldl2t N\n");
+ ret = 1;
+ }
+ /* Round downward. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x400;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldl2t" : "=t" (ld_res));
+ if (ld_res != 0x3.5269e12f346e2bf8p+0L) {
+ printf("FAIL: fldl2t D\n");
+ ret = 1;
+ }
+ /* Round toward zero. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0xc00;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldl2t" : "=t" (ld_res));
+ if (ld_res != 0x3.5269e12f346e2bf8p+0L) {
+ printf("FAIL: fldl2t Z\n");
+ ret = 1;
+ }
+ /* Round upward. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x800;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldl2t" : "=t" (ld_res));
+ if (ld_res != 0x3.5269e12f346e2bfcp+0L) {
+ printf("FAIL: fldl2t U\n");
+ ret = 1;
+ }
+
+ /* Round to nearest. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x000;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldl2e" : "=t" (ld_res));
+ if (ld_res != 0x1.71547652b82fe178p+0L) {
+ printf("FAIL: fldl2e N\n");
+ ret = 1;
+ }
+ /* Round downward. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x400;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldl2e" : "=t" (ld_res));
+ if (ld_res != 0x1.71547652b82fe176p+0L) {
+ printf("FAIL: fldl2e D\n");
+ ret = 1;
+ }
+ /* Round toward zero. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0xc00;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldl2e" : "=t" (ld_res));
+ if (ld_res != 0x1.71547652b82fe176p+0L) {
+ printf("FAIL: fldl2e Z\n");
+ ret = 1;
+ }
+ /* Round upward. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x800;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldl2e" : "=t" (ld_res));
+ if (ld_res != 0x1.71547652b82fe178p+0L) {
+ printf("FAIL: fldl2e U\n");
+ ret = 1;
+ }
+
+ /* Round to nearest. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x000;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldpi" : "=t" (ld_res));
+ if (ld_res != 0x3.243f6a8885a308d4p+0L) {
+ printf("FAIL: fldpi N\n");
+ ret = 1;
+ }
+ /* Round downward. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x400;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldpi" : "=t" (ld_res));
+ if (ld_res != 0x3.243f6a8885a308dp+0L) {
+ printf("FAIL: fldpi D\n");
+ ret = 1;
+ }
+ /* Round toward zero. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0xc00;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldpi" : "=t" (ld_res));
+ if (ld_res != 0x3.243f6a8885a308dp+0L) {
+ printf("FAIL: fldpi Z\n");
+ ret = 1;
+ }
+ /* Round upward. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x800;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldpi" : "=t" (ld_res));
+ if (ld_res != 0x3.243f6a8885a308d4p+0L) {
+ printf("FAIL: fldpi U\n");
+ ret = 1;
+ }
+
+ /* Round to nearest. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x000;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldlg2" : "=t" (ld_res));
+ if (ld_res != 0x4.d104d427de7fbcc8p-4L) {
+ printf("FAIL: fldlg2 N\n");
+ ret = 1;
+ }
+ /* Round downward. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x400;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldlg2" : "=t" (ld_res));
+ if (ld_res != 0x4.d104d427de7fbccp-4L) {
+ printf("FAIL: fldlg2 D\n");
+ ret = 1;
+ }
+ /* Round toward zero. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0xc00;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldlg2" : "=t" (ld_res));
+ if (ld_res != 0x4.d104d427de7fbccp-4L) {
+ printf("FAIL: fldlg2 Z\n");
+ ret = 1;
+ }
+ /* Round upward. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x800;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldlg2" : "=t" (ld_res));
+ if (ld_res != 0x4.d104d427de7fbcc8p-4L) {
+ printf("FAIL: fldlg2 U\n");
+ ret = 1;
+ }
+
+ /* Round to nearest. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x000;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldln2" : "=t" (ld_res));
+ if (ld_res != 0xb.17217f7d1cf79acp-4L) {
+ printf("FAIL: fldln2 N\n");
+ ret = 1;
+ }
+ /* Round downward. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x400;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldln2" : "=t" (ld_res));
+ if (ld_res != 0xb.17217f7d1cf79abp-4L) {
+ printf("FAIL: fldln2 D\n");
+ ret = 1;
+ }
+ /* Round toward zero. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0xc00;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldln2" : "=t" (ld_res));
+ if (ld_res != 0xb.17217f7d1cf79abp-4L) {
+ printf("FAIL: fldln2 Z\n");
+ ret = 1;
+ }
+ /* Round upward. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x800;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fldln2" : "=t" (ld_res));
+ if (ld_res != 0xb.17217f7d1cf79acp-4L) {
+ printf("FAIL: fldln2 U\n");
+ ret = 1;
+ }
+
+ return ret;
+}
diff --git a/tests/tcg/i386/test-i386-fp-exceptions.c b/tests/tcg/i386/test-i386-fp-exceptions.c
new file mode 100644
index 0000000000..dfb7117c17
--- /dev/null
+++ b/tests/tcg/i386/test-i386-fp-exceptions.c
@@ -0,0 +1,831 @@
+/* Test floating-point exceptions. */
+
+#include <float.h>
+#include <stdint.h>
+#include <stdio.h>
+
+union u {
+ struct { uint64_t sig; uint16_t sign_exp; } s;
+ long double ld;
+};
+
+volatile float f_res;
+volatile double d_res;
+volatile long double ld_res;
+volatile long double ld_res2;
+
+volatile union u ld_invalid_1 = { .s = { 1, 1234 } };
+volatile float f_snan = __builtin_nansf("");
+volatile double d_snan = __builtin_nans("");
+volatile long double ld_third = 1.0L / 3.0L;
+volatile long double ld_snan = __builtin_nansl("");
+volatile long double ld_nan = __builtin_nanl("");
+volatile long double ld_inf = __builtin_infl();
+volatile long double ld_ninf = -__builtin_infl();
+volatile long double ld_one = 1.0L;
+volatile long double ld_zero = 0.0L;
+volatile long double ld_nzero = -0.0L;
+volatile long double ld_min = LDBL_MIN;
+volatile long double ld_max = LDBL_MAX;
+volatile long double ld_nmax = -LDBL_MAX;
+
+#define IE (1 << 0)
+#define ZE (1 << 2)
+#define OE (1 << 3)
+#define UE (1 << 4)
+#define PE (1 << 5)
+#define EXC (IE | ZE | OE | UE | PE)
+
+int main(void)
+{
+ short sw;
+ unsigned char out[10];
+ int ret = 0;
+ int16_t res_16;
+ int32_t res_32;
+ int64_t res_64;
+
+ __asm__ volatile ("fnclex");
+ ld_res = f_snan;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: widen float snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = d_snan;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: widen double snan\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ f_res = ld_min;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != (UE | PE)) {
+ printf("FAIL: narrow float underflow\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ d_res = ld_min;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != (UE | PE)) {
+ printf("FAIL: narrow double underflow\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ f_res = ld_max;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != (OE | PE)) {
+ printf("FAIL: narrow float overflow\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ d_res = ld_max;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != (OE | PE)) {
+ printf("FAIL: narrow double overflow\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ f_res = ld_third;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: narrow float inexact\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ d_res = ld_third;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: narrow double inexact\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ f_res = ld_snan;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: narrow float snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ d_res = ld_snan;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: narrow double snan\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ f_res = ld_invalid_1.ld;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: narrow float invalid\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ d_res = ld_invalid_1.ld;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: narrow double invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("frndint" : "=t" (ld_res) : "0" (ld_min));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: frndint min\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("frndint" : "=t" (ld_res) : "0" (ld_snan));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: frndint snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("frndint" : "=t" (ld_res) : "0" (ld_invalid_1.ld));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: frndint invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fcom" : : "t" (ld_nan), "u" (ld_zero));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fcom nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fucom" : : "t" (ld_nan), "u" (ld_zero));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != 0) {
+ printf("FAIL: fucom nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fucom" : : "t" (ld_snan), "u" (ld_zero));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fucom snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fucom" : : "t" (1.0L), "u" (ld_invalid_1.ld));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fucom invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ ld_res = ld_max + ld_max;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != (OE | PE)) {
+ printf("FAIL: add overflow\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_max + ld_min;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: add inexact\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_inf + ld_ninf;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: add inf -inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_snan + ld_third;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: add snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_third + ld_invalid_1.ld;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: add invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ ld_res = ld_max - ld_nmax;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != (OE | PE)) {
+ printf("FAIL: sub overflow\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_max - ld_min;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: sub inexact\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_inf - ld_inf;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: sub inf inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_snan - ld_third;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: sub snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_third - ld_invalid_1.ld;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: sub invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ ld_res = ld_max * ld_max;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != (OE | PE)) {
+ printf("FAIL: mul overflow\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_third * ld_third;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: mul inexact\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_min * ld_min;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != (UE | PE)) {
+ printf("FAIL: mul underflow\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_inf * ld_zero;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: mul inf 0\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_snan * ld_third;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: mul snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_third * ld_invalid_1.ld;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: mul invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ ld_res = ld_max / ld_min;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != (OE | PE)) {
+ printf("FAIL: div overflow\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_one / ld_third;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: div inexact\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_min / ld_max;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != (UE | PE)) {
+ printf("FAIL: div underflow\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_one / ld_zero;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != ZE) {
+ printf("FAIL: div 1 0\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_inf / ld_zero;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != 0) {
+ printf("FAIL: div inf 0\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_nan / ld_zero;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != 0) {
+ printf("FAIL: div nan 0\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_zero / ld_zero;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: div 0 0\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_inf / ld_inf;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: div inf inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_snan / ld_third;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: div snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ ld_res = ld_third / ld_invalid_1.ld;
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: div invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fsqrt" : "=t" (ld_res) : "0" (ld_max));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: fsqrt inexact\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fsqrt" : "=t" (ld_res) : "0" (ld_nmax));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fsqrt -max\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fsqrt" : "=t" (ld_res) : "0" (ld_ninf));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fsqrt -inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fsqrt" : "=t" (ld_res) : "0" (ld_snan));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fsqrt snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fsqrt" : "=t" (ld_res) : "0" (ld_invalid_1.ld));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fsqrt invalid\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fsqrt" : "=t" (ld_res) : "0" (ld_nzero));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != 0) {
+ printf("FAIL: fsqrt -0\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fsqrt" : "=t" (ld_res) : "0" (-__builtin_nanl("")));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != 0) {
+ printf("FAIL: fsqrt -nan\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistp %0" : "=m" (res_16) : "t" (1.5L) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: fistp inexact\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistp %0" : "=m" (res_16) : "t" (32767.5L) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fistp 32767.5\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistp %0" : "=m" (res_16) : "t" (-32768.51L) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fistp -32768.51\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistp %0" : "=m" (res_16) : "t" (ld_nan) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fistp nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistp %0" : "=m" (res_16) : "t" (ld_invalid_1.ld) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fistp invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistpl %0" : "=m" (res_32) : "t" (1.5L) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: fistpl inexact\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistpl %0" : "=m" (res_32) : "t" (2147483647.5L) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fistpl 2147483647.5\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistpl %0" : "=m" (res_32) : "t" (-2147483648.51L) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fistpl -2147483648.51\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistpl %0" : "=m" (res_32) : "t" (ld_nan) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fistpl nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistpl %0" : "=m" (res_32) : "t" (ld_invalid_1.ld) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fistpl invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistpll %0" : "=m" (res_64) : "t" (1.5L) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: fistpll inexact\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistpll %0" : "=m" (res_64) : "t" (0x1p63) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fistpll 0x1p63\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistpll %0" : "=m" (res_64) : "t" (-0x1.1p63L) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fistpll -0x1.1p63\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistpll %0" : "=m" (res_64) : "t" (ld_nan) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fistpll nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fistpll %0" : "=m" (res_64) : "t" (ld_invalid_1.ld) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fistpll invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (1.5L) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: fisttp inexact\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (32768.0L) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttp 32768\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (32768.5L) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttp 32768.5\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (-32769.0L) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttp -32769\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (-32769.5L) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttp -32769.5\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (ld_nan) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttp nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttp %0" : "=m" (res_16) : "t" (ld_invalid_1.ld) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttp invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttpl %0" : "=m" (res_32) : "t" (1.5L) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: fisttpl inexact\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttpl %0" : "=m" (res_32) : "t" (2147483648.0L) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttpl 2147483648\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttpl %0" : "=m" (res_32) : "t" (-2147483649.0L) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttpl -2147483649\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttpl %0" : "=m" (res_32) : "t" (ld_nan) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttpl nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttpl %0" : "=m" (res_32) : "t" (ld_invalid_1.ld) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttpl invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttpll %0" : "=m" (res_64) : "t" (1.5L) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: fisttpll inexact\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttpll %0" : "=m" (res_64) : "t" (0x1p63) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttpll 0x1p63\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttpll %0" : "=m" (res_64) : "t" (-0x1.1p63L) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttpll -0x1.1p63\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttpll %0" : "=m" (res_64) : "t" (ld_nan) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttpll nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fisttpll %0" : "=m" (res_64) : "t" (ld_invalid_1.ld) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fisttpll invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fxtract" : "=t" (ld_res), "=u" (ld_res2) :
+ "0" (ld_zero));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != ZE) {
+ printf("FAIL: fxtract 0\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fxtract" : "=t" (ld_res), "=u" (ld_res2) :
+ "0" (ld_nzero));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != ZE) {
+ printf("FAIL: fxtract -0\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fxtract" : "=t" (ld_res), "=u" (ld_res2) :
+ "0" (ld_inf));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != 0) {
+ printf("FAIL: fxtract inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fxtract" : "=t" (ld_res), "=u" (ld_res2) :
+ "0" (ld_nan));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != 0) {
+ printf("FAIL: fxtract nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fxtract" : "=t" (ld_res), "=u" (ld_res2) :
+ "0" (ld_snan));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fxtract snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fxtract" : "=t" (ld_res), "=u" (ld_res2) :
+ "0" (ld_invalid_1.ld));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fxtract invalid\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fscale" : "=t" (ld_res) : "0" (ld_min), "u" (ld_max));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != (OE | PE)) {
+ printf("FAIL: fscale overflow\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fscale" : "=t" (ld_res) : "0" (ld_max), "u" (ld_nmax));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != (UE | PE)) {
+ printf("FAIL: fscale underflow\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fscale" : "=t" (ld_res) : "0" (ld_zero), "u" (ld_inf));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fscale 0 inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fscale" : "=t" (ld_res) : "0" (ld_inf), "u" (ld_ninf));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fscale inf -inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fscale" : "=t" (ld_res) : "0" (ld_one), "u" (ld_snan));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fscale 1 snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fscale" : "=t" (ld_res) : "0" (ld_snan), "u" (ld_nan));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fscale snan nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fscale" : "=t" (ld_res) :
+ "0" (ld_invalid_1.ld), "u" (ld_one));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fscale invalid 1\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fscale" : "=t" (ld_res) :
+ "0" (ld_invalid_1.ld), "u" (ld_nan));
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fscale invalid nan\n");
+ ret = 1;
+ }
+
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (1.5L) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != PE) {
+ printf("FAIL: fbstp 1.5\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (999999999999999999.5L) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fbstp 999999999999999999.5\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (-1000000000000000000.0L) :
+ "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fbstp -1000000000000000000\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (ld_inf) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fbstp inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (ld_nan) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fbstp nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (ld_snan) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fbstp snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fnclex");
+ __asm__ volatile ("fbstp %0" : "=m" (out) : "t" (ld_invalid_1.ld) : "st");
+ __asm__ volatile ("fnstsw" : "=a" (sw));
+ if ((sw & EXC) != IE) {
+ printf("FAIL: fbstp invalid\n");
+ ret = 1;
+ }
+
+ return ret;
+}
diff --git a/tests/tcg/i386/test-i386-fscale.c b/tests/tcg/i386/test-i386-fscale.c
new file mode 100644
index 0000000000..d23b3cfeec
--- /dev/null
+++ b/tests/tcg/i386/test-i386-fscale.c
@@ -0,0 +1,108 @@
+/* Test fscale instruction. */
+
+#include <stdint.h>
+#include <stdio.h>
+
+union u {
+ struct { uint64_t sig; uint16_t sign_exp; } s;
+ long double ld;
+};
+
+volatile long double ld_third = 1.0L / 3.0L;
+volatile long double ld_four_thirds = 4.0L / 3.0L;
+volatile union u ld_invalid_1 = { .s = { 1, 1234 } };
+volatile union u ld_invalid_2 = { .s = { 0, 1234 } };
+volatile union u ld_invalid_3 = { .s = { 0, 0x7fff } };
+volatile union u ld_invalid_4 = { .s = { (UINT64_C(1) << 63) - 1, 0x7fff } };
+
+volatile long double ld_res;
+
+int isnan_ld(long double x)
+{
+ union u tmp = { .ld = x };
+ return ((tmp.s.sign_exp & 0x7fff) == 0x7fff &&
+ (tmp.s.sig >> 63) != 0 &&
+ (tmp.s.sig << 1) != 0);
+}
+
+int issignaling_ld(long double x)
+{
+ union u tmp = { .ld = x };
+ return isnan_ld(x) && (tmp.s.sig & UINT64_C(0x4000000000000000)) == 0;
+}
+
+int main(void)
+{
+ short cw;
+ int ret = 0;
+ __asm__ volatile ("fscale" : "=t" (ld_res) :
+ "0" (2.5L), "u" (__builtin_nansl("")));
+ if (!isnan_ld(ld_res) || issignaling_ld(ld_res)) {
+ printf("FAIL: fscale snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fscale" : "=t" (ld_res) :
+ "0" (2.5L), "u" (ld_invalid_1.ld));
+ if (!isnan_ld(ld_res) || issignaling_ld(ld_res)) {
+ printf("FAIL: fscale invalid 1\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fscale" : "=t" (ld_res) :
+ "0" (2.5L), "u" (ld_invalid_2.ld));
+ if (!isnan_ld(ld_res) || issignaling_ld(ld_res)) {
+ printf("FAIL: fscale invalid 2\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fscale" : "=t" (ld_res) :
+ "0" (2.5L), "u" (ld_invalid_3.ld));
+ if (!isnan_ld(ld_res) || issignaling_ld(ld_res)) {
+ printf("FAIL: fscale invalid 3\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fscale" : "=t" (ld_res) :
+ "0" (2.5L), "u" (ld_invalid_4.ld));
+ if (!isnan_ld(ld_res) || issignaling_ld(ld_res)) {
+ printf("FAIL: fscale invalid 4\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fscale" : "=t" (ld_res) :
+ "0" (0.0L), "u" (__builtin_infl()));
+ if (!isnan_ld(ld_res) || issignaling_ld(ld_res)) {
+ printf("FAIL: fscale 0 up inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fscale" : "=t" (ld_res) :
+ "0" (__builtin_infl()), "u" (-__builtin_infl()));
+ if (!isnan_ld(ld_res) || issignaling_ld(ld_res)) {
+ printf("FAIL: fscale inf down inf\n");
+ ret = 1;
+ }
+ /* Set round-downward. */
+ __asm__ volatile ("fnstcw %0" : "=m" (cw));
+ cw = (cw & ~0xc00) | 0x400;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fscale" : "=t" (ld_res) :
+ "0" (1.0L), "u" (__builtin_infl()));
+ if (ld_res != __builtin_infl()) {
+ printf("FAIL: fscale finite up inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fscale" : "=t" (ld_res) :
+ "0" (-1.0L), "u" (-__builtin_infl()));
+ if (ld_res != -0.0L || __builtin_copysignl(1.0L, ld_res) != -1.0L) {
+ printf("FAIL: fscale finite down inf\n");
+ ret = 1;
+ }
+ /* Set round-to-nearest with single-precision rounding. */
+ cw = cw & ~0xf00;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ __asm__ volatile ("fscale" : "=t" (ld_res) :
+ "0" (ld_third), "u" (2.0L));
+ cw = cw | 0x300;
+ __asm__ volatile ("fldcw %0" : : "m" (cw));
+ if (ld_res != ld_four_thirds) {
+ printf("FAIL: fscale single-precision\n");
+ ret = 1;
+ }
+ return ret;
+}
diff --git a/tests/tcg/i386/test-i386-fxam.c b/tests/tcg/i386/test-i386-fxam.c
new file mode 100644
index 0000000000..ddd76ca42d
--- /dev/null
+++ b/tests/tcg/i386/test-i386-fxam.c
@@ -0,0 +1,143 @@
+/* Test fxam instruction. */
+
+#include <stdint.h>
+#include <stdio.h>
+
+union u {
+ struct { uint64_t sig; uint16_t sign_exp; } s;
+ long double ld;
+};
+
+volatile union u ld_pseudo_m16382 = { .s = { UINT64_C(1) << 63, 0 } };
+volatile union u ld_pseudo_nm16382 = { .s = { UINT64_C(1) << 63, 0x8000 } };
+volatile union u ld_invalid_1 = { .s = { 1, 1234 } };
+volatile union u ld_invalid_2 = { .s = { 0, 1234 } };
+volatile union u ld_invalid_3 = { .s = { 0, 0x7fff } };
+volatile union u ld_invalid_4 = { .s = { (UINT64_C(1) << 63) - 1, 0x7fff } };
+volatile union u ld_invalid_n1 = { .s = { 1, 0x8123 } };
+volatile union u ld_invalid_n2 = { .s = { 0, 0x8123 } };
+volatile union u ld_invalid_n3 = { .s = { 0, 0xffff } };
+volatile union u ld_invalid_n4 = { .s = { (UINT64_C(1) << 63) - 1, 0xffff } };
+
+#define C0 (1 << 8)
+#define C1 (1 << 9)
+#define C2 (1 << 10)
+#define C3 (1 << 14)
+#define FLAGS (C0 | C1 | C2 | C3)
+
+int main(void)
+{
+ short sw;
+ int ret = 0;
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (0.0L));
+ if ((sw & FLAGS) != C3) {
+ printf("FAIL: +0\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (-0.0L));
+ if ((sw & FLAGS) != (C3 | C1)) {
+ printf("FAIL: -0\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (1.0L));
+ if ((sw & FLAGS) != C2) {
+ printf("FAIL: +normal\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (-1.0L));
+ if ((sw & FLAGS) != (C2 | C1)) {
+ printf("FAIL: -normal\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (__builtin_infl()));
+ if ((sw & FLAGS) != (C2 | C0)) {
+ printf("FAIL: +inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (-__builtin_infl()));
+ if ((sw & FLAGS) != (C2 | C1 | C0)) {
+ printf("FAIL: -inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (__builtin_nanl("")));
+ if ((sw & FLAGS) != C0) {
+ printf("FAIL: +nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (-__builtin_nanl("")));
+ if ((sw & FLAGS) != (C1 | C0)) {
+ printf("FAIL: -nan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (__builtin_nansl("")));
+ if ((sw & FLAGS) != C0) {
+ printf("FAIL: +snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (-__builtin_nansl("")));
+ if ((sw & FLAGS) != (C1 | C0)) {
+ printf("FAIL: -snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (0x1p-16445L));
+ if ((sw & FLAGS) != (C3 | C2)) {
+ printf("FAIL: +denormal\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (-0x1p-16445L));
+ if ((sw & FLAGS) != (C3 | C2 | C1)) {
+ printf("FAIL: -denormal\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (ld_pseudo_m16382.ld));
+ if ((sw & FLAGS) != (C3 | C2)) {
+ printf("FAIL: +pseudo-denormal\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (ld_pseudo_nm16382.ld));
+ if ((sw & FLAGS) != (C3 | C2 | C1)) {
+ printf("FAIL: -pseudo-denormal\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (ld_invalid_1.ld));
+ if ((sw & FLAGS) != 0) {
+ printf("FAIL: +invalid 1\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (ld_invalid_n1.ld));
+ if ((sw & FLAGS) != C1) {
+ printf("FAIL: -invalid 1\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (ld_invalid_2.ld));
+ if ((sw & FLAGS) != 0) {
+ printf("FAIL: +invalid 2\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (ld_invalid_n2.ld));
+ if ((sw & FLAGS) != C1) {
+ printf("FAIL: -invalid 2\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (ld_invalid_3.ld));
+ if ((sw & FLAGS) != 0) {
+ printf("FAIL: +invalid 3\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (ld_invalid_n3.ld));
+ if ((sw & FLAGS) != C1) {
+ printf("FAIL: -invalid 3\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (ld_invalid_4.ld));
+ if ((sw & FLAGS) != 0) {
+ printf("FAIL: +invalid 4\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxam\nfnstsw" : "=a" (sw) : "t" (ld_invalid_n4.ld));
+ if ((sw & FLAGS) != C1) {
+ printf("FAIL: -invalid 4\n");
+ ret = 1;
+ }
+ return ret;
+}
diff --git a/tests/tcg/i386/test-i386-fxtract.c b/tests/tcg/i386/test-i386-fxtract.c
new file mode 100644
index 0000000000..64fd93d333
--- /dev/null
+++ b/tests/tcg/i386/test-i386-fxtract.c
@@ -0,0 +1,120 @@
+/* Test fxtract instruction. */
+
+#include <stdint.h>
+#include <stdio.h>
+
+union u {
+ struct { uint64_t sig; uint16_t sign_exp; } s;
+ long double ld;
+};
+
+volatile union u ld_pseudo_m16382 = { .s = { UINT64_C(1) << 63, 0 } };
+volatile union u ld_invalid_1 = { .s = { 1, 1234 } };
+volatile union u ld_invalid_2 = { .s = { 0, 1234 } };
+volatile union u ld_invalid_3 = { .s = { 0, 0x7fff } };
+volatile union u ld_invalid_4 = { .s = { (UINT64_C(1) << 63) - 1, 0x7fff } };
+
+volatile long double ld_sig, ld_exp;
+
+int isnan_ld(long double x)
+{
+ union u tmp = { .ld = x };
+ return ((tmp.s.sign_exp & 0x7fff) == 0x7fff &&
+ (tmp.s.sig >> 63) != 0 &&
+ (tmp.s.sig << 1) != 0);
+}
+
+int issignaling_ld(long double x)
+{
+ union u tmp = { .ld = x };
+ return isnan_ld(x) && (tmp.s.sig & UINT64_C(0x4000000000000000)) == 0;
+}
+
+int main(void)
+{
+ int ret = 0;
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) : "0" (2.5L));
+ if (ld_sig != 1.25L || ld_exp != 1.0L) {
+ printf("FAIL: fxtract 2.5\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) : "0" (0.0L));
+ if (ld_sig != 0.0L || __builtin_copysignl(1.0L, ld_sig) != 1.0L ||
+ ld_exp != -__builtin_infl()) {
+ printf("FAIL: fxtract 0.0\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) : "0" (-0.0L));
+ if (ld_sig != -0.0L || __builtin_copysignl(1.0L, ld_sig) != -1.0L ||
+ ld_exp != -__builtin_infl()) {
+ printf("FAIL: fxtract -0.0\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) :
+ "0" (__builtin_infl()));
+ if (ld_sig != __builtin_infl() || ld_exp != __builtin_infl()) {
+ printf("FAIL: fxtract inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) :
+ "0" (-__builtin_infl()));
+ if (ld_sig != -__builtin_infl() || ld_exp != __builtin_infl()) {
+ printf("FAIL: fxtract -inf\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) :
+ "0" (__builtin_nanl("")));
+ if (!isnan_ld(ld_sig) || issignaling_ld(ld_sig) ||
+ !isnan_ld(ld_exp) || issignaling_ld(ld_exp)) {
+ printf("FAIL: fxtract qnan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) :
+ "0" (__builtin_nansl("")));
+ if (!isnan_ld(ld_sig) || issignaling_ld(ld_sig) ||
+ !isnan_ld(ld_exp) || issignaling_ld(ld_exp)) {
+ printf("FAIL: fxtract snan\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) :
+ "0" (0x1p-16445L));
+ if (ld_sig != 1.0L || ld_exp != -16445.0L) {
+ printf("FAIL: fxtract subnormal\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) :
+ "0" (ld_pseudo_m16382.ld));
+ if (ld_sig != 1.0L || ld_exp != -16382.0L) {
+ printf("FAIL: fxtract pseudo\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) :
+ "0" (ld_invalid_1.ld));
+ if (!isnan_ld(ld_sig) || issignaling_ld(ld_sig) ||
+ !isnan_ld(ld_exp) || issignaling_ld(ld_exp)) {
+ printf("FAIL: fxtract invalid 1\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) :
+ "0" (ld_invalid_2.ld));
+ if (!isnan_ld(ld_sig) || issignaling_ld(ld_sig) ||
+ !isnan_ld(ld_exp) || issignaling_ld(ld_exp)) {
+ printf("FAIL: fxtract invalid 2\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) :
+ "0" (ld_invalid_3.ld));
+ if (!isnan_ld(ld_sig) || issignaling_ld(ld_sig) ||
+ !isnan_ld(ld_exp) || issignaling_ld(ld_exp)) {
+ printf("FAIL: fxtract invalid 3\n");
+ ret = 1;
+ }
+ __asm__ volatile ("fxtract" : "=t" (ld_sig), "=u" (ld_exp) :
+ "0" (ld_invalid_4.ld));
+ if (!isnan_ld(ld_sig) || issignaling_ld(ld_sig) ||
+ !isnan_ld(ld_exp) || issignaling_ld(ld_exp)) {
+ printf("FAIL: fxtract invalid 4\n");
+ ret = 1;
+ }
+ return ret;
+}
diff --git a/tests/tcg/i386/test-i386-pcmpistri.c b/tests/tcg/i386/test-i386-pcmpistri.c
new file mode 100644
index 0000000000..1e81ae611a
--- /dev/null
+++ b/tests/tcg/i386/test-i386-pcmpistri.c
@@ -0,0 +1,33 @@
+/* Test pcmpistri instruction. */
+
+#include <nmmintrin.h>
+#include <stdio.h>
+
+union u {
+ __m128i x;
+ unsigned char uc[16];
+};
+
+union u s0 = { .uc = { 0 } };
+union u s1 = { .uc = "abcdefghijklmnop" };
+union u s2 = { .uc = "bcdefghijklmnopa" };
+union u s3 = { .uc = "bcdefghijklmnab" };
+
+int
+main(void)
+{
+ int ret = 0;
+ if (_mm_cmpistri(s0.x, s0.x, 0x4c) != 15) {
+ printf("FAIL: pcmpistri test 1\n");
+ ret = 1;
+ }
+ if (_mm_cmpistri(s1.x, s2.x, 0x4c) != 15) {
+ printf("FAIL: pcmpistri test 2\n");
+ ret = 1;
+ }
+ if (_mm_cmpistri(s1.x, s3.x, 0x4c) != 16) {
+ printf("FAIL: pcmpistri test 3\n");
+ ret = 1;
+ }
+ return ret;
+}
diff --git a/tests/test-io-task.c b/tests/test-io-task.c
index aa8b653bfa..c8a3813d49 100644
--- a/tests/test-io-task.c
+++ b/tests/test-io-task.c
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
+#include "qom/object.h"
#include "io/task.h"
#include "qapi/error.h"
#include "qemu/module.h"
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
index 062236a1ab..916f1be224 100644
--- a/util/oslib-posix.c
+++ b/util/oslib-posix.c
@@ -48,11 +48,13 @@
#ifdef __FreeBSD__
#include <sys/sysctl.h>
#include <sys/user.h>
+#include <sys/thr.h>
#include <libutil.h>
#endif
#ifdef __NetBSD__
#include <sys/sysctl.h>
+#include <lwp.h>
#endif
#include "qemu/mmap-alloc.h"
@@ -84,6 +86,13 @@ int qemu_get_thread_id(void)
{
#if defined(__linux__)
return syscall(SYS_gettid);
+#elif defined(__FreeBSD__)
+ /* thread id is up to INT_MAX */
+ long tid;
+ thr_self(&tid);
+ return (int)tid;
+#elif defined(__NetBSD__)
+ return _lwp_self();
#else
return getpid();
#endif