diff options
33 files changed, 1435 insertions, 193 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index cae3b09f9c..355982b623 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -327,6 +327,7 @@ L: xen-devel@lists.xenproject.org S: Supported F: xen-* F: */xen* +F: hw/9pfs/xen-9p-backend.c F: hw/char/xen_console.c F: hw/display/xenfb.c F: hw/net/xen_nic.c diff --git a/Makefile.target b/Makefile.target index e62021d5aa..465a633367 100644 --- a/Makefile.target +++ b/Makefile.target @@ -149,12 +149,6 @@ obj-y += dump.o obj-y += migration/ram.o migration/savevm.o LIBS := $(libs_softmmu) $(LIBS) -# xen support -obj-$(CONFIG_XEN) += xen-common.o -obj-$(CONFIG_XEN_I386) += xen-hvm.o xen-mapcache.o -obj-$(call lnot,$(CONFIG_XEN)) += xen-common-stub.o -obj-$(call lnot,$(CONFIG_XEN_I386)) += xen-hvm-stub.o - # Hardware support ifeq ($(TARGET_NAME), sparc64) obj-y += hw/sparc64/ @@ -1997,30 +1997,65 @@ fi # xen probe if test "$xen" != "no" ; then - xen_libs="-lxenstore -lxenctrl -lxenguest" - xen_stable_libs="-lxenforeignmemory -lxengnttab -lxenevtchn" + # Check whether Xen library path is specified via --extra-ldflags to avoid + # overriding this setting with pkg-config output. If not, try pkg-config + # to obtain all needed flags. + + if ! echo $EXTRA_LDFLAGS | grep tools/libxc > /dev/null && \ + $pkg_config --exists xencontrol ; then + xen_ctrl_version="$(printf '%d%02d%02d' \ + $($pkg_config --modversion xencontrol | sed 's/\./ /g') )" + xen=yes + xen_pc="xencontrol xenstore xenguest xenforeignmemory xengnttab" + xen_pc="$xen_pc xenevtchn xendevicemodel" + QEMU_CFLAGS="$QEMU_CFLAGS $($pkg_config --cflags $xen_pc)" + libs_softmmu="$($pkg_config --libs $xen_pc) $libs_softmmu" + LDFLAGS="$($pkg_config --libs $xen_pc) $LDFLAGS" + else - # First we test whether Xen headers and libraries are available. - # If no, we are done and there is no Xen support. - # If yes, more tests are run to detect the Xen version. + xen_libs="-lxenstore -lxenctrl -lxenguest" + xen_stable_libs="-lxencall -lxenforeignmemory -lxengnttab -lxenevtchn" - # Xen (any) - cat > $TMPC <<EOF + # First we test whether Xen headers and libraries are available. + # If no, we are done and there is no Xen support. + # If yes, more tests are run to detect the Xen version. + + # Xen (any) + cat > $TMPC <<EOF #include <xenctrl.h> int main(void) { return 0; } EOF - if ! compile_prog "" "$xen_libs" ; then - # Xen not found - if test "$xen" = "yes" ; then - feature_not_found "xen" "Install xen devel" - fi - xen=no + if ! compile_prog "" "$xen_libs" ; then + # Xen not found + if test "$xen" = "yes" ; then + feature_not_found "xen" "Install xen devel" + fi + xen=no + + # Xen unstable + elif + cat > $TMPC <<EOF && +#undef XC_WANT_COMPAT_DEVICEMODEL_API +#define __XEN_TOOLS__ +#include <xendevicemodel.h> +int main(void) { + xendevicemodel_handle *xd; - # Xen unstable - elif - cat > $TMPC <<EOF && + xd = xendevicemodel_open(0, 0); + xendevicemodel_close(xd); + + return 0; +} +EOF + compile_prog "" "$xen_libs -lxendevicemodel $xen_stable_libs" + then + xen_stable_libs="-lxendevicemodel $xen_stable_libs" + xen_ctrl_version=40900 + xen=yes + elif + cat > $TMPC <<EOF && /* * If we have stable libs the we don't want the libxc compat * layers, regardless of what CFLAGS we may have been given. @@ -2070,12 +2105,12 @@ int main(void) { return 0; } EOF - compile_prog "" "$xen_libs $xen_stable_libs" - then - xen_ctrl_version=480 - xen=yes - elif - cat > $TMPC <<EOF && + compile_prog "" "$xen_libs $xen_stable_libs" + then + xen_ctrl_version=40800 + xen=yes + elif + cat > $TMPC <<EOF && /* * If we have stable libs the we don't want the libxc compat * layers, regardless of what CFLAGS we may have been given. @@ -2121,12 +2156,12 @@ int main(void) { return 0; } EOF - compile_prog "" "$xen_libs $xen_stable_libs" - then - xen_ctrl_version=471 - xen=yes - elif - cat > $TMPC <<EOF && + compile_prog "" "$xen_libs $xen_stable_libs" + then + xen_ctrl_version=40701 + xen=yes + elif + cat > $TMPC <<EOF && #include <xenctrl.h> #include <stdint.h> int main(void) { @@ -2136,14 +2171,14 @@ int main(void) { return 0; } EOF - compile_prog "" "$xen_libs" - then - xen_ctrl_version=470 - xen=yes - - # Xen 4.6 - elif - cat > $TMPC <<EOF && + compile_prog "" "$xen_libs" + then + xen_ctrl_version=40700 + xen=yes + + # Xen 4.6 + elif + cat > $TMPC <<EOF && #include <xenctrl.h> #include <xenstore.h> #include <stdint.h> @@ -2164,14 +2199,14 @@ int main(void) { return 0; } EOF - compile_prog "" "$xen_libs" - then - xen_ctrl_version=460 - xen=yes - - # Xen 4.5 - elif - cat > $TMPC <<EOF && + compile_prog "" "$xen_libs" + then + xen_ctrl_version=40600 + xen=yes + + # Xen 4.5 + elif + cat > $TMPC <<EOF && #include <xenctrl.h> #include <xenstore.h> #include <stdint.h> @@ -2191,13 +2226,13 @@ int main(void) { return 0; } EOF - compile_prog "" "$xen_libs" - then - xen_ctrl_version=450 - xen=yes + compile_prog "" "$xen_libs" + then + xen_ctrl_version=40500 + xen=yes - elif - cat > $TMPC <<EOF && + elif + cat > $TMPC <<EOF && #include <xenctrl.h> #include <xenstore.h> #include <stdint.h> @@ -2216,24 +2251,25 @@ int main(void) { return 0; } EOF - compile_prog "" "$xen_libs" - then - xen_ctrl_version=420 - xen=yes + compile_prog "" "$xen_libs" + then + xen_ctrl_version=40200 + xen=yes - else - if test "$xen" = "yes" ; then - feature_not_found "xen (unsupported version)" \ - "Install a supported xen (xen 4.2 or newer)" + else + if test "$xen" = "yes" ; then + feature_not_found "xen (unsupported version)" \ + "Install a supported xen (xen 4.2 or newer)" + fi + xen=no fi - xen=no - fi - if test "$xen" = yes; then - if test $xen_ctrl_version -ge 471 ; then - libs_softmmu="$xen_stable_libs $libs_softmmu" + if test "$xen" = yes; then + if test $xen_ctrl_version -ge 40701 ; then + libs_softmmu="$xen_stable_libs $libs_softmmu" + fi + libs_softmmu="$xen_libs $libs_softmmu" fi - libs_softmmu="$xen_libs $libs_softmmu" fi fi diff --git a/default-configs/i386-softmmu.mak b/default-configs/i386-softmmu.mak index 029e95202a..d2ab2f6655 100644 --- a/default-configs/i386-softmmu.mak +++ b/default-configs/i386-softmmu.mak @@ -39,7 +39,6 @@ CONFIG_TPM_TIS=$(CONFIG_TPM) CONFIG_MC146818RTC=y CONFIG_PCI_PIIX=y CONFIG_WDT_IB700=y -CONFIG_XEN_I386=$(CONFIG_XEN) CONFIG_ISA_DEBUG=y CONFIG_ISA_TESTDEV=y CONFIG_VMPORT=y diff --git a/default-configs/x86_64-softmmu.mak b/default-configs/x86_64-softmmu.mak index d1d7432f74..9bde2f1c4b 100644 --- a/default-configs/x86_64-softmmu.mak +++ b/default-configs/x86_64-softmmu.mak @@ -39,7 +39,6 @@ CONFIG_TPM_TIS=$(CONFIG_TPM) CONFIG_MC146818RTC=y CONFIG_PCI_PIIX=y CONFIG_WDT_IB700=y -CONFIG_XEN_I386=$(CONFIG_XEN) CONFIG_ISA_DEBUG=y CONFIG_ISA_TESTDEV=y CONFIG_VMPORT=y diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h index b7e836251e..5312d8a424 100644 --- a/hw/9pfs/9p.h +++ b/hw/9pfs/9p.h @@ -119,6 +119,12 @@ static inline char *rpath(FsContext *ctx, const char *path) typedef struct V9fsPDU V9fsPDU; struct V9fsState; +typedef struct { + uint32_t size_le; + uint8_t id; + uint16_t tag_le; +} QEMU_PACKED P9MsgHeader; + struct V9fsPDU { uint32_t size; diff --git a/hw/9pfs/Makefile.objs b/hw/9pfs/Makefile.objs index 32197e6671..cab5e942ed 100644 --- a/hw/9pfs/Makefile.objs +++ b/hw/9pfs/Makefile.objs @@ -5,5 +5,6 @@ common-obj-y += coth.o cofs.o codir.o cofile.o common-obj-y += coxattr.o 9p-synth.o common-obj-$(CONFIG_OPEN_BY_HANDLE) += 9p-handle.o common-obj-y += 9p-proxy.o +common-obj-$(CONFIG_XEN) += xen-9p-backend.o obj-y += virtio-9p-device.o diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c index 27a4a32f5c..3782f43702 100644 --- a/hw/9pfs/virtio-9p-device.c +++ b/hw/9pfs/virtio-9p-device.c @@ -46,11 +46,7 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq) VirtQueueElement *elem; while ((pdu = pdu_alloc(s))) { - struct { - uint32_t size_le; - uint8_t id; - uint16_t tag_le; - } QEMU_PACKED out; + P9MsgHeader out; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c new file mode 100644 index 0000000000..9c7f41af99 --- /dev/null +++ b/hw/9pfs/xen-9p-backend.c @@ -0,0 +1,440 @@ +/* + * Xen 9p backend + * + * Copyright Aporeto 2017 + * + * Authors: + * Stefano Stabellini <stefano@aporeto.com> + * + */ + +#include "qemu/osdep.h" + +#include "hw/hw.h" +#include "hw/9pfs/9p.h" +#include "hw/xen/xen_backend.h" +#include "hw/9pfs/xen-9pfs.h" +#include "qemu/config-file.h" +#include "fsdev/qemu-fsdev.h" + +#define VERSIONS "1" +#define MAX_RINGS 8 +#define MAX_RING_ORDER 8 + +typedef struct Xen9pfsRing { + struct Xen9pfsDev *priv; + + int ref; + xenevtchn_handle *evtchndev; + int evtchn; + int local_port; + int ring_order; + struct xen_9pfs_data_intf *intf; + unsigned char *data; + struct xen_9pfs_data ring; + + struct iovec *sg; + QEMUBH *bh; + + /* local copies, so that we can read/write PDU data directly from + * the ring */ + RING_IDX out_cons, out_size, in_cons; + bool inprogress; +} Xen9pfsRing; + +typedef struct Xen9pfsDev { + struct XenDevice xendev; /* must be first */ + V9fsState state; + char *path; + char *security_model; + char *tag; + char *id; + + int num_rings; + Xen9pfsRing *rings; +} Xen9pfsDev; + +static void xen_9pfs_in_sg(Xen9pfsRing *ring, + struct iovec *in_sg, + int *num, + uint32_t idx, + uint32_t size) +{ + RING_IDX cons, prod, masked_prod, masked_cons; + + cons = ring->intf->in_cons; + prod = ring->intf->in_prod; + xen_rmb(); + masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order)); + masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + + if (masked_prod < masked_cons) { + in_sg[0].iov_base = ring->ring.in + masked_prod; + in_sg[0].iov_len = masked_cons - masked_prod; + *num = 1; + } else { + in_sg[0].iov_base = ring->ring.in + masked_prod; + in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod; + in_sg[1].iov_base = ring->ring.in; + in_sg[1].iov_len = masked_cons; + *num = 2; + } +} + +static void xen_9pfs_out_sg(Xen9pfsRing *ring, + struct iovec *out_sg, + int *num, + uint32_t idx) +{ + RING_IDX cons, prod, masked_prod, masked_cons; + + cons = ring->intf->out_cons; + prod = ring->intf->out_prod; + xen_rmb(); + masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order)); + masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + + if (masked_cons < masked_prod) { + out_sg[0].iov_base = ring->ring.out + masked_cons; + out_sg[0].iov_len = ring->out_size; + *num = 1; + } else { + if (ring->out_size > + (XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) { + out_sg[0].iov_base = ring->ring.out + masked_cons; + out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - + masked_cons; + out_sg[1].iov_base = ring->ring.out; + out_sg[1].iov_len = ring->out_size - + (XEN_FLEX_RING_SIZE(ring->ring_order) - + masked_cons); + *num = 2; + } else { + out_sg[0].iov_base = ring->ring.out + masked_cons; + out_sg[0].iov_len = ring->out_size; + *num = 1; + } + } +} + +static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu, + size_t offset, + const char *fmt, + va_list ap) +{ + Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); + struct iovec in_sg[2]; + int num; + + xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings], + in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512)); + return v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap); +} + +static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu, + size_t offset, + const char *fmt, + va_list ap) +{ + Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); + struct iovec out_sg[2]; + int num; + + xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings], + out_sg, &num, pdu->idx); + return v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap); +} + +static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu, + struct iovec **piov, + unsigned int *pniov) +{ + Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); + Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings]; + int num; + + g_free(ring->sg); + + ring->sg = g_malloc0(sizeof(*ring->sg) * 2); + xen_9pfs_out_sg(ring, ring->sg, &num, pdu->idx); + *piov = ring->sg; + *pniov = num; +} + +static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu, + struct iovec **piov, + unsigned int *pniov, + size_t size) +{ + Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); + Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings]; + int num; + + g_free(ring->sg); + + ring->sg = g_malloc0(sizeof(*ring->sg) * 2); + xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size); + *piov = ring->sg; + *pniov = num; +} + +static void xen_9pfs_push_and_notify(V9fsPDU *pdu) +{ + RING_IDX prod; + Xen9pfsDev *priv = container_of(pdu->s, Xen9pfsDev, state); + Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings]; + + g_free(ring->sg); + ring->sg = NULL; + + ring->intf->out_cons = ring->out_cons; + xen_wmb(); + + prod = ring->intf->in_prod; + xen_rmb(); + ring->intf->in_prod = prod + pdu->size; + xen_wmb(); + + ring->inprogress = false; + xenevtchn_notify(ring->evtchndev, ring->local_port); + + qemu_bh_schedule(ring->bh); +} + +static const struct V9fsTransport xen_9p_transport = { + .pdu_vmarshal = xen_9pfs_pdu_vmarshal, + .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal, + .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu, + .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu, + .push_and_notify = xen_9pfs_push_and_notify, +}; + +static int xen_9pfs_init(struct XenDevice *xendev) +{ + return 0; +} + +static int xen_9pfs_receive(Xen9pfsRing *ring) +{ + P9MsgHeader h; + RING_IDX cons, prod, masked_prod, masked_cons; + V9fsPDU *pdu; + + if (ring->inprogress) { + return 0; + } + + cons = ring->intf->out_cons; + prod = ring->intf->out_prod; + xen_rmb(); + + if (xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order)) < + sizeof(h)) { + return 0; + } + ring->inprogress = true; + + masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order)); + masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + + xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h), + masked_prod, &masked_cons, + XEN_FLEX_RING_SIZE(ring->ring_order)); + + /* cannot fail, because we only handle one request per ring at a time */ + pdu = pdu_alloc(&ring->priv->state); + pdu->size = le32_to_cpu(h.size_le); + pdu->id = h.id; + pdu->tag = le32_to_cpu(h.tag_le); + ring->out_size = le32_to_cpu(h.size_le); + ring->out_cons = cons + le32_to_cpu(h.size_le); + + qemu_co_queue_init(&pdu->complete); + pdu_submit(pdu); + + return 0; +} + +static void xen_9pfs_bh(void *opaque) +{ + Xen9pfsRing *ring = opaque; + xen_9pfs_receive(ring); +} + +static void xen_9pfs_evtchn_event(void *opaque) +{ + Xen9pfsRing *ring = opaque; + evtchn_port_t port; + + port = xenevtchn_pending(ring->evtchndev); + xenevtchn_unmask(ring->evtchndev, port); + + qemu_bh_schedule(ring->bh); +} + +static int xen_9pfs_free(struct XenDevice *xendev) +{ + int i; + Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); + + g_free(xen_9pdev->id); + g_free(xen_9pdev->tag); + g_free(xen_9pdev->path); + g_free(xen_9pdev->security_model); + + for (i = 0; i < xen_9pdev->num_rings; i++) { + if (xen_9pdev->rings[i].data != NULL) { + xengnttab_unmap(xen_9pdev->xendev.gnttabdev, + xen_9pdev->rings[i].data, + (1 << xen_9pdev->rings[i].ring_order)); + } + if (xen_9pdev->rings[i].intf != NULL) { + xengnttab_unmap(xen_9pdev->xendev.gnttabdev, + xen_9pdev->rings[i].intf, + 1); + } + if (xen_9pdev->rings[i].evtchndev > 0) { + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), + NULL, NULL, NULL); + xenevtchn_unbind(xen_9pdev->rings[i].evtchndev, + xen_9pdev->rings[i].local_port); + } + if (xen_9pdev->rings[i].bh != NULL) { + qemu_bh_delete(xen_9pdev->rings[i].bh); + } + } + g_free(xen_9pdev->rings); + return 0; +} + +static int xen_9pfs_connect(struct XenDevice *xendev) +{ + int i; + Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); + V9fsState *s = &xen_9pdev->state; + QemuOpts *fsdev; + + if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings", + &xen_9pdev->num_rings) == -1 || + xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) { + return -1; + } + + xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(Xen9pfsRing)); + for (i = 0; i < xen_9pdev->num_rings; i++) { + char *str; + int ring_order; + + xen_9pdev->rings[i].priv = xen_9pdev; + xen_9pdev->rings[i].evtchn = -1; + xen_9pdev->rings[i].local_port = -1; + + str = g_strdup_printf("ring-ref%u", i); + if (xenstore_read_fe_int(&xen_9pdev->xendev, str, + &xen_9pdev->rings[i].ref) == -1) { + goto out; + } + g_free(str); + str = g_strdup_printf("event-channel-%u", i); + if (xenstore_read_fe_int(&xen_9pdev->xendev, str, + &xen_9pdev->rings[i].evtchn) == -1) { + goto out; + } + g_free(str); + + xen_9pdev->rings[i].intf = xengnttab_map_grant_ref( + xen_9pdev->xendev.gnttabdev, + xen_9pdev->xendev.dom, + xen_9pdev->rings[i].ref, + PROT_READ | PROT_WRITE); + if (!xen_9pdev->rings[i].intf) { + goto out; + } + ring_order = xen_9pdev->rings[i].intf->ring_order; + if (ring_order > MAX_RING_ORDER) { + goto out; + } + xen_9pdev->rings[i].ring_order = ring_order; + xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs( + xen_9pdev->xendev.gnttabdev, + (1 << ring_order), + xen_9pdev->xendev.dom, + xen_9pdev->rings[i].intf->ref, + PROT_READ | PROT_WRITE); + if (!xen_9pdev->rings[i].data) { + goto out; + } + xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data; + xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data + + XEN_FLEX_RING_SIZE(ring_order); + + xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]); + xen_9pdev->rings[i].out_cons = 0; + xen_9pdev->rings[i].out_size = 0; + xen_9pdev->rings[i].inprogress = false; + + + xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0); + if (xen_9pdev->rings[i].evtchndev == NULL) { + goto out; + } + fcntl(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), F_SETFD, FD_CLOEXEC); + xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain + (xen_9pdev->rings[i].evtchndev, + xendev->dom, + xen_9pdev->rings[i].evtchn); + if (xen_9pdev->rings[i].local_port == -1) { + xen_pv_printf(xendev, 0, + "xenevtchn_bind_interdomain failed port=%d\n", + xen_9pdev->rings[i].evtchn); + goto out; + } + xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port); + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), + xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]); + } + + xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model"); + xen_9pdev->path = xenstore_read_be_str(xendev, "path"); + xen_9pdev->id = s->fsconf.fsdev_id = + g_strdup_printf("xen9p%d", xendev->dev); + xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, "tag"); + v9fs_register_transport(s, &xen_9p_transport); + fsdev = qemu_opts_create(qemu_find_opts("fsdev"), + s->fsconf.tag, + 1, NULL); + qemu_opt_set(fsdev, "fsdriver", "local", NULL); + qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL); + qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL); + qemu_opts_set_id(fsdev, s->fsconf.fsdev_id); + qemu_fsdev_add(fsdev); + v9fs_device_realize_common(s, NULL); + + return 0; + +out: + xen_9pfs_free(xendev); + return -1; +} + +static void xen_9pfs_alloc(struct XenDevice *xendev) +{ + xenstore_write_be_str(xendev, "versions", VERSIONS); + xenstore_write_be_int(xendev, "max-rings", MAX_RINGS); + xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER); +} + +static void xen_9pfs_disconnect(struct XenDevice *xendev) +{ + /* Dynamic hotplug of PV filesystems at runtime is not supported. */ +} + +struct XenDevOps xen_9pfs_ops = { + .size = sizeof(Xen9pfsDev), + .flags = DEVOPS_FLAG_NEED_GNTDEV, + .alloc = xen_9pfs_alloc, + .init = xen_9pfs_init, + .initialise = xen_9pfs_connect, + .disconnect = xen_9pfs_disconnect, + .free = xen_9pfs_free, +}; diff --git a/hw/9pfs/xen-9pfs.h b/hw/9pfs/xen-9pfs.h new file mode 100644 index 0000000000..2d6ef7828c --- /dev/null +++ b/hw/9pfs/xen-9pfs.h @@ -0,0 +1,21 @@ +/* + * Xen 9p backend + * + * Copyright Aporeto 2017 + * + * Authors: + * Stefano Stabellini <stefano@aporeto.com> + * + * This work is licensed under the terms of the GNU GPL version 2 or + * later. See the COPYING file in the top-level directory. + * + */ + +#include <xen/io/protocols.h> +#include "hw/xen/io/ring.h" + +/* + * Do not merge into xen-9p-backend.c: clang doesn't allow unused static + * inline functions in c files. + */ +DEFINE_XEN_FLEX_RING_AND_INTF(xen_9pfs); diff --git a/hw/block/xen_blkif.h b/hw/block/xen_blkif.h index 3300b6fc0a..3e6e1ea365 100644 --- a/hw/block/xen_blkif.h +++ b/hw/block/xen_blkif.h @@ -1,7 +1,7 @@ #ifndef XEN_BLKIF_H #define XEN_BLKIF_H -#include <xen/io/ring.h> +#include "hw/xen/io/ring.h" #include <xen/io/blkif.h> #include <xen/io/protocols.h> diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c index 456a2d5694..27df0486d9 100644 --- a/hw/block/xen_disk.c +++ b/hw/block/xen_disk.c @@ -492,7 +492,7 @@ static int ioreq_map(struct ioreq *ioreq) return 0; } -#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 480 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800 static void ioreq_free_copy_buffers(struct ioreq *ioreq) { diff --git a/hw/i386/xen/Makefile.objs b/hw/i386/xen/Makefile.objs index 801a68d326..be9d10cf2a 100644 --- a/hw/i386/xen/Makefile.objs +++ b/hw/i386/xen/Makefile.objs @@ -1 +1 @@ -obj-y += xen_platform.o xen_apic.o xen_pvdevice.o +obj-y += xen_platform.o xen_apic.o xen_pvdevice.o xen-hvm.o xen-mapcache.o diff --git a/hw/i386/xen/trace-events b/hw/i386/xen/trace-events index 321fe60fed..547438db13 100644 --- a/hw/i386/xen/trace-events +++ b/hw/i386/xen/trace-events @@ -4,3 +4,20 @@ xen_platform_log(char *s) "xen platform: %s" # hw/i386/xen/xen_pvdevice.c xen_pv_mmio_read(uint64_t addr) "WARNING: read from Xen PV Device MMIO space (address %"PRIx64")" xen_pv_mmio_write(uint64_t addr) "WARNING: write to Xen PV Device MMIO space (address %"PRIx64")" + +# xen-hvm.c +xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: %#lx, size %#lx" +xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "%#"PRIx64" size %#lx, log_dirty %i" +handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" +handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p read type=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" +handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p write type=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" +cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" +cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=%#"PRIx64" port=%#"PRIx64" size=%d" +cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=%#"PRIx64" port=%#"PRIx64" size=%d" +cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" + +# xen-mapcache.c +xen_map_cache(uint64_t phys_addr) "want %#"PRIx64 +xen_remap_bucket(uint64_t index) "index %#"PRIx64 +xen_map_cache_return(void* ptr) "%p" + diff --git a/xen-hvm.c b/hw/i386/xen/xen-hvm.c index 5043beb98f..b1c05ffb86 100644 --- a/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -22,7 +22,7 @@ #include "qemu/error-report.h" #include "qemu/range.h" #include "sysemu/xen-mapcache.h" -#include "trace-root.h" +#include "trace.h" #include "exec/address-spaces.h" #include <xen/hvm/ioreq.h> @@ -125,8 +125,8 @@ int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) void xen_piix3_set_irq(void *opaque, int irq_num, int level) { - xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2, - irq_num & 3, level); + xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2, + irq_num & 3, level); } void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) @@ -141,7 +141,7 @@ void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) } v &= 0xf; if (((address + i) >= 0x60) && ((address + i) <= 0x63)) { - xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v); + xen_set_pci_link_route(xen_domid, address + i - 0x60, v); } } } @@ -156,7 +156,7 @@ int xen_is_pirq_msi(uint32_t msi_data) void xen_hvm_inject_msi(uint64_t addr, uint32_t data) { - xc_hvm_inject_msi(xen_xc, xen_domid, addr, data); + xen_inject_msi(xen_domid, addr, data); } static void xen_suspend_notifier(Notifier *notifier, void *data) @@ -168,7 +168,7 @@ static void xen_suspend_notifier(Notifier *notifier, void *data) static void xen_set_irq(void *opaque, int irq, int level) { - xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level); + xen_set_isa_irq_level(xen_domid, irq, level); } qemu_irq *xen_interrupt_controller_init(void) @@ -454,10 +454,10 @@ static void xen_set_memory(struct MemoryListener *listener, return; } else { if (add) { - xen_map_memory_section(xen_xc, xen_domid, state->ioservid, + xen_map_memory_section(xen_domid, state->ioservid, section); } else { - xen_unmap_memory_section(xen_xc, xen_domid, state->ioservid, + xen_unmap_memory_section(xen_domid, state->ioservid, section); } } @@ -481,10 +481,10 @@ static void xen_set_memory(struct MemoryListener *listener, section->mr, section->offset_within_region); } else { mem_type = HVMMEM_ram_ro; - if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type, - start_addr >> TARGET_PAGE_BITS, - size >> TARGET_PAGE_BITS)) { - DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n", + if (xen_set_mem_type(xen_domid, mem_type, + start_addr >> TARGET_PAGE_BITS, + size >> TARGET_PAGE_BITS)) { + DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n", start_addr); } } @@ -521,7 +521,7 @@ static void xen_io_add(MemoryListener *listener, memory_region_ref(mr); - xen_map_io_section(xen_xc, xen_domid, state->ioservid, section); + xen_map_io_section(xen_domid, state->ioservid, section); } static void xen_io_del(MemoryListener *listener, @@ -534,7 +534,7 @@ static void xen_io_del(MemoryListener *listener, return; } - xen_unmap_io_section(xen_xc, xen_domid, state->ioservid, section); + xen_unmap_io_section(xen_domid, state->ioservid, section); memory_region_unref(mr); } @@ -547,7 +547,7 @@ static void xen_device_realize(DeviceListener *listener, if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { PCIDevice *pci_dev = PCI_DEVICE(dev); - xen_map_pcidev(xen_xc, xen_domid, state->ioservid, pci_dev); + xen_map_pcidev(xen_domid, state->ioservid, pci_dev); } } @@ -559,7 +559,7 @@ static void xen_device_unrealize(DeviceListener *listener, if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { PCIDevice *pci_dev = PCI_DEVICE(dev); - xen_unmap_pcidev(xen_xc, xen_domid, state->ioservid, pci_dev); + xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); } } @@ -586,9 +586,8 @@ static void xen_sync_dirty_bitmap(XenIOState *state, return; } - rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid, - start_addr >> TARGET_PAGE_BITS, npages, - bitmap); + rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS, + npages, bitmap); if (rc < 0) { #ifndef ENODATA #define ENODATA ENOENT @@ -634,7 +633,7 @@ static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section, if (old & ~new & (1 << DIRTY_MEMORY_VGA)) { state->log_for_dirtybit = NULL; /* Disable dirty bit tracking */ - xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL); + xen_track_dirty_vram(xen_domid, 0, 0, NULL); } } @@ -1139,7 +1138,7 @@ static void xen_hvm_change_state_handler(void *opaque, int running, xen_main_loop_prepare(state); } - xen_set_ioreq_server_state(xen_xc, xen_domid, + xen_set_ioreq_server_state(xen_domid, state->ioservid, (rstate == RUN_STATE_RUNNING)); } @@ -1227,7 +1226,15 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) goto err; } - xen_create_ioreq_server(xen_xc, xen_domid, &state->ioservid); + if (xen_domid_restrict) { + rc = xen_restrict(xen_domid); + if (rc < 0) { + error_report("failed to restrict: error %d", errno); + goto err; + } + } + + xen_create_ioreq_server(xen_domid, &state->ioservid); state->exit.notify = xen_exit_notifier; qemu_add_exit_notifier(&state->exit); @@ -1238,7 +1245,7 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) state->wakeup.notify = xen_wakeup_notifier; qemu_register_wakeup_notifier(&state->wakeup); - rc = xen_get_ioreq_server_info(xen_xc, xen_domid, state->ioservid, + rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, &ioreq_pfn, &bufioreq_pfn, &bufioreq_evtchn); if (rc < 0) { @@ -1288,7 +1295,7 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) /* Note: cpus is empty at this point in init */ state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *)); - rc = xen_set_ioreq_server_state(xen_xc, xen_domid, state->ioservid, true); + rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); if (rc < 0) { error_report("failed to enable ioreq server info: error %d handle=%p", errno, xen_xc); @@ -1391,7 +1398,7 @@ void xen_shutdown_fatal_error(const char *fmt, ...) qemu_system_shutdown_request(); } -void xen_modified_memory(ram_addr_t start, ram_addr_t length) +void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) { if (unlikely(xen_in_migration)) { int rc; @@ -1403,7 +1410,7 @@ void xen_modified_memory(ram_addr_t start, ram_addr_t length) start_pfn = start >> TARGET_PAGE_BITS; nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) - start_pfn; - rc = xc_hvm_modified_memory(xen_xc, xen_domid, start_pfn, nb_pages); + rc = xen_modified_memory(xen_domid, start_pfn, nb_pages); if (rc) { fprintf(stderr, "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n", diff --git a/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c index 1a96d2e5db..31debdfb2c 100644 --- a/xen-mapcache.c +++ b/hw/i386/xen/xen-mapcache.c @@ -19,7 +19,7 @@ #include <xen/hvm/params.h> #include "sysemu/xen-mapcache.h" -#include "trace-root.h" +#include "trace.h" //#define MAPCACHE_DEBUG diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c index 6010f35266..1419fc96d2 100644 --- a/hw/i386/xen/xen_platform.c +++ b/hw/i386/xen/xen_platform.c @@ -195,7 +195,7 @@ static void platform_fixed_ioport_writeb(void *opaque, uint32_t addr, uint32_t v case 0: /* Platform flags */ { hvmmem_type_t mem_type = (val & PFFLAG_ROM_LOCK) ? HVMMEM_ram_ro : HVMMEM_ram_rw; - if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type, 0xc0, 0x40)) { + if (xen_set_mem_type(xen_domid, mem_type, 0xc0, 0x40)) { DPRINTF("unable to change ro/rw state of ROM memory area!\n"); } else { s->flags = val & PFFLAG_ROM_LOCK; diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c index 8e676e6c96..370b3d9387 100644 --- a/hw/usb/xen-usb.c +++ b/hw/usb/xen-usb.c @@ -33,7 +33,7 @@ #include "qapi/qmp/qint.h" #include "qapi/qmp/qstring.h" -#include <xen/io/ring.h> +#include "hw/xen/io/ring.h" #include <xen/io/usbif.h> /* diff --git a/hw/xen/Makefile.objs b/hw/xen/Makefile.objs index 4be3ec9c77..64a70bc6cb 100644 --- a/hw/xen/Makefile.objs +++ b/hw/xen/Makefile.objs @@ -1,5 +1,5 @@ # xen backend driver support -common-obj-$(CONFIG_XEN) += xen_backend.o xen_devconfig.o xen_pvdev.o +common-obj-$(CONFIG_XEN) += xen_backend.o xen_devconfig.o xen_pvdev.o xen-common.o obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen-host-pci-device.o obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen_pt.o xen_pt_config_init.o xen_pt_graphics.o xen_pt_msi.o diff --git a/hw/xen/trace-events b/hw/xen/trace-events index c4fb6f1aea..5615dce2c1 100644 --- a/hw/xen/trace-events +++ b/hw/xen/trace-events @@ -11,3 +11,4 @@ xen_map_portio_range(uint32_t id, uint64_t start_addr, uint64_t end_addr) "id: % xen_unmap_portio_range(uint32_t id, uint64_t start_addr, uint64_t end_addr) "id: %u start: %#"PRIx64" end: %#"PRIx64 xen_map_pcidev(uint32_t id, uint8_t bus, uint8_t dev, uint8_t func) "id: %u bdf: %02x.%02x.%02x" xen_unmap_pcidev(uint32_t id, uint8_t bus, uint8_t dev, uint8_t func) "id: %u bdf: %02x.%02x.%02x" +xen_domid_restrict(int err) "err: %u" diff --git a/xen-common.c b/hw/xen/xen-common.c index fd2c92847e..ae76150e8a 100644 --- a/xen-common.c +++ b/hw/xen/xen-common.c @@ -25,6 +25,10 @@ do { } while (0) #endif +xc_interface *xen_xc; +xenforeignmemory_handle *xen_fmem; +xendevicemodel_handle *xen_dmod; + static int store_dev_info(int domid, Chardev *cs, const char *string) { struct xs_handle *xs = NULL; @@ -125,6 +129,13 @@ static int xen_init(MachineState *ms) xc_interface_close(xen_xc); return -1; } + xen_dmod = xendevicemodel_open(0, 0); + if (xen_dmod == NULL) { + xen_pv_printf(NULL, 0, "can't open xen devicemodel interface\n"); + xenforeignmemory_close(xen_fmem); + xc_interface_close(xen_xc); + return -1; + } qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); global_state_set_optional(); diff --git a/hw/xen/xen_backend.c b/hw/xen/xen_backend.c index 6c21c37d68..c85f1637e4 100644 --- a/hw/xen/xen_backend.c +++ b/hw/xen/xen_backend.c @@ -43,8 +43,6 @@ BusState *xen_sysbus; /* ------------------------------------------------------------- */ /* public */ -xc_interface *xen_xc = NULL; -xenforeignmemory_handle *xen_fmem = NULL; struct xs_handle *xenstore = NULL; const char *xen_protocol; @@ -585,6 +583,9 @@ void xen_be_register_common(void) xen_be_register("console", &xen_console_ops); xen_be_register("vkbd", &xen_kbdmouse_ops); xen_be_register("qdisk", &xen_blkdev_ops); +#ifdef CONFIG_VIRTFS + xen_be_register("9pfs", &xen_9pfs_ops); +#endif #ifdef CONFIG_USB_LIBUSB xen_be_register("qusb", &xen_usb_ops); #endif diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 6436a413e7..dbe2f08d47 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -260,7 +260,7 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, rcu_read_unlock(); - xen_modified_memory(start, length); + xen_hvm_modified_memory(start, length); } #if !defined(_WIN32) @@ -314,7 +314,7 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, rcu_read_unlock(); - xen_modified_memory(start, pages << TARGET_PAGE_BITS); + xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS); } else { uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE; /* diff --git a/include/hw/xen/io/ring.h b/include/hw/xen/io/ring.h new file mode 100644 index 0000000000..abbca47687 --- /dev/null +++ b/include/hw/xen/io/ring.h @@ -0,0 +1,482 @@ +/****************************************************************************** + * ring.h + * + * Shared producer-consumer ring macros. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Tim Deegan and Andrew Warfield November 2004. + */ + +#ifndef __XEN_PUBLIC_IO_RING_H__ +#define __XEN_PUBLIC_IO_RING_H__ + +/* + * When #include'ing this header, you need to provide the following + * declaration upfront: + * - standard integers types (uint8_t, uint16_t, etc) + * They are provided by stdint.h of the standard headers. + * + * In addition, if you intend to use the FLEX macros, you also need to + * provide the following, before invoking the FLEX macros: + * - size_t + * - memcpy + * - grant_ref_t + * These declarations are provided by string.h of the standard headers, + * and grant_table.h from the Xen public headers. + */ + +#if __XEN_INTERFACE_VERSION__ < 0x00030208 +#define xen_mb() mb() +#define xen_rmb() rmb() +#define xen_wmb() wmb() +#endif + +typedef unsigned int RING_IDX; + +/* Round a 32-bit unsigned constant down to the nearest power of two. */ +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) +#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) +#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) +#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) +#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) + +/* + * Calculate size of a shared ring, given the total available space for the + * ring and indexes (_sz), and the name tag of the request/response structure. + * A ring contains as many entries as will fit, rounded down to the nearest + * power of two (so we can mask with (size-1) to loop around). + */ +#define __CONST_RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ + sizeof(((struct _s##_sring *)0)->ring[0]))) +/* + * The same for passing in an actual pointer instead of a name tag. + */ +#define __RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) + +/* + * Macros to make the correct C datatypes for a new kind of ring. + * + * To make a new ring datatype, you need to have two message structures, + * let's say request_t, and response_t already defined. + * + * In a header where you want the ring datatype declared, you then do: + * + * DEFINE_RING_TYPES(mytag, request_t, response_t); + * + * These expand out to give you a set of types, as you can see below. + * The most important of these are: + * + * mytag_sring_t - The shared ring. + * mytag_front_ring_t - The 'front' half of the ring. + * mytag_back_ring_t - The 'back' half of the ring. + * + * To initialize a ring in your code you need to know the location and size + * of the shared memory area (PAGE_SIZE, for instance). To initialise + * the front half: + * + * mytag_front_ring_t front_ring; + * SHARED_RING_INIT((mytag_sring_t *)shared_page); + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); + * + * Initializing the back follows similarly (note that only the front + * initializes the shared ring): + * + * mytag_back_ring_t back_ring; + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); + */ + +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ + \ +/* Shared ring entry */ \ +union __name##_sring_entry { \ + __req_t req; \ + __rsp_t rsp; \ +}; \ + \ +/* Shared ring page */ \ +struct __name##_sring { \ + RING_IDX req_prod, req_event; \ + RING_IDX rsp_prod, rsp_event; \ + union { \ + struct { \ + uint8_t smartpoll_active; \ + } netif; \ + struct { \ + uint8_t msg; \ + } tapif_user; \ + uint8_t pvt_pad[4]; \ + } pvt; \ + uint8_t __pad[44]; \ + union __name##_sring_entry ring[1]; /* variable-length */ \ +}; \ + \ +/* "Front" end's private variables */ \ +struct __name##_front_ring { \ + RING_IDX req_prod_pvt; \ + RING_IDX rsp_cons; \ + unsigned int nr_ents; \ + struct __name##_sring *sring; \ +}; \ + \ +/* "Back" end's private variables */ \ +struct __name##_back_ring { \ + RING_IDX rsp_prod_pvt; \ + RING_IDX req_cons; \ + unsigned int nr_ents; \ + struct __name##_sring *sring; \ +}; \ + \ +/* Syntactic sugar */ \ +typedef struct __name##_sring __name##_sring_t; \ +typedef struct __name##_front_ring __name##_front_ring_t; \ +typedef struct __name##_back_ring __name##_back_ring_t + +/* + * Macros for manipulating rings. + * + * FRONT_RING_whatever works on the "front end" of a ring: here + * requests are pushed on to the ring and responses taken off it. + * + * BACK_RING_whatever works on the "back end" of a ring: here + * requests are taken off the ring and responses put on. + * + * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. + * This is OK in 1-for-1 request-response situations where the + * requestor (front end) never has more than RING_SIZE()-1 + * outstanding requests. + */ + +/* Initialising empty rings */ +#define SHARED_RING_INIT(_s) do { \ + (_s)->req_prod = (_s)->rsp_prod = 0; \ + (_s)->req_event = (_s)->rsp_event = 1; \ + (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \ + (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ +} while(0) + +#define FRONT_RING_INIT(_r, _s, __size) do { \ + (_r)->req_prod_pvt = 0; \ + (_r)->rsp_cons = 0; \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ + (_r)->sring = (_s); \ +} while (0) + +#define BACK_RING_INIT(_r, _s, __size) do { \ + (_r)->rsp_prod_pvt = 0; \ + (_r)->req_cons = 0; \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ + (_r)->sring = (_s); \ +} while (0) + +/* How big is this ring? */ +#define RING_SIZE(_r) \ + ((_r)->nr_ents) + +/* Number of free requests (for use on front side only). */ +#define RING_FREE_REQUESTS(_r) \ + (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) + +/* Test if there is an empty slot available on the front ring. + * (This is only meaningful from the front. ) + */ +#define RING_FULL(_r) \ + (RING_FREE_REQUESTS(_r) == 0) + +/* Test if there are outstanding messages to be processed on a ring. */ +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \ + ((_r)->sring->rsp_prod - (_r)->rsp_cons) + +#ifdef __GNUC__ +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ + unsigned int rsp = RING_SIZE(_r) - \ + ((_r)->req_cons - (_r)->rsp_prod_pvt); \ + req < rsp ? req : rsp; \ +}) +#else +/* Same as above, but without the nice GCC ({ ... }) syntax. */ +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \ + ((((_r)->sring->req_prod - (_r)->req_cons) < \ + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ + ((_r)->sring->req_prod - (_r)->req_cons) : \ + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) +#endif + +/* Direct access to individual ring elements, by index. */ +#define RING_GET_REQUEST(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) + +/* + * Get a local copy of a request. + * + * Use this in preference to RING_GET_REQUEST() so all processing is + * done on a local copy that cannot be modified by the other end. + * + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this + * to be ineffective where _req is a struct which consists of only bitfields. + */ +#define RING_COPY_REQUEST(_r, _idx, _req) do { \ + /* Use volatile to force the copy into _req. */ \ + *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ +} while (0) + +#define RING_GET_RESPONSE(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) + +/* Loop termination condition: Would the specified index overflow the ring? */ +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ + (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) + +/* Ill-behaved frontend determination: Can there be this many requests? */ +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) + +#define RING_PUSH_REQUESTS(_r) do { \ + xen_wmb(); /* back sees requests /before/ updated producer index */ \ + (_r)->sring->req_prod = (_r)->req_prod_pvt; \ +} while (0) + +#define RING_PUSH_RESPONSES(_r) do { \ + xen_wmb(); /* front sees resps /before/ updated producer index */ \ + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ +} while (0) + +/* + * Notification hold-off (req_event and rsp_event): + * + * When queueing requests or responses on a shared ring, it may not always be + * necessary to notify the remote end. For example, if requests are in flight + * in a backend, the front may be able to queue further requests without + * notifying the back (if the back checks for new requests when it queues + * responses). + * + * When enqueuing requests or responses: + * + * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument + * is a boolean return value. True indicates that the receiver requires an + * asynchronous notification. + * + * After dequeuing requests or responses (before sleeping the connection): + * + * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). + * The second argument is a boolean return value. True indicates that there + * are pending messages on the ring (i.e., the connection should not be put + * to sleep). + * + * These macros will set the req_event/rsp_event field to trigger a + * notification on the very next message that is enqueued. If you want to + * create batches of work (i.e., only receive a notification after several + * messages have been enqueued) then you will need to create a customised + * version of the FINAL_CHECK macro in your own code, which sets the event + * field appropriately. + */ + +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ + RING_IDX __old = (_r)->sring->req_prod; \ + RING_IDX __new = (_r)->req_prod_pvt; \ + xen_wmb(); /* back sees requests /before/ updated producer index */ \ + (_r)->sring->req_prod = __new; \ + xen_mb(); /* back sees new requests /before/ we check req_event */ \ + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ + (RING_IDX)(__new - __old)); \ +} while (0) + +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ + RING_IDX __old = (_r)->sring->rsp_prod; \ + RING_IDX __new = (_r)->rsp_prod_pvt; \ + xen_wmb(); /* front sees resps /before/ updated producer index */ \ + (_r)->sring->rsp_prod = __new; \ + xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ + (RING_IDX)(__new - __old)); \ +} while (0) + +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ + if (_work_to_do) break; \ + (_r)->sring->req_event = (_r)->req_cons + 1; \ + xen_mb(); \ + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ +} while (0) + +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ + if (_work_to_do) break; \ + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ + xen_mb(); \ + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ +} while (0) + + +/* + * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and + * functions to check if there is data on the ring, and to read and + * write to them. + * + * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but + * does not define the indexes page. As different protocols can have + * extensions to the basic format, this macro allow them to define their + * own struct. + * + * XEN_FLEX_RING_SIZE + * Convenience macro to calculate the size of one of the two rings + * from the overall order. + * + * $NAME_mask + * Function to apply the size mask to an index, to reduce the index + * within the range [0-size]. + * + * $NAME_read_packet + * Function to read data from the ring. The amount of data to read is + * specified by the "size" argument. + * + * $NAME_write_packet + * Function to write data to the ring. The amount of data to write is + * specified by the "size" argument. + * + * $NAME_get_ring_ptr + * Convenience function that returns a pointer to read/write to the + * ring at the right location. + * + * $NAME_data_intf + * Indexes page, shared between frontend and backend. It also + * contains the array of grant refs. + * + * $NAME_queued + * Function to calculate how many bytes are currently on the ring, + * ready to be read. It can also be used to calculate how much free + * space is currently on the ring (XEN_FLEX_RING_SIZE() - + * $NAME_queued()). + */ + +#ifndef XEN_PAGE_SHIFT +/* The PAGE_SIZE for ring protocols and hypercall interfaces is always + * 4K, regardless of the architecture, and page granularity chosen by + * operating systems. + */ +#define XEN_PAGE_SHIFT 12 +#endif +#define XEN_FLEX_RING_SIZE(order) \ + (1UL << ((order) + XEN_PAGE_SHIFT - 1)) + +#define DEFINE_XEN_FLEX_RING(name) \ +static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \ +{ \ + return idx & (ring_size - 1); \ +} \ + \ +static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \ + RING_IDX idx, \ + RING_IDX ring_size) \ +{ \ + return buf + name##_mask(idx, ring_size); \ +} \ + \ +static inline void name##_read_packet(void *opaque, \ + const unsigned char *buf, \ + size_t size, \ + RING_IDX masked_prod, \ + RING_IDX *masked_cons, \ + RING_IDX ring_size) \ +{ \ + if (*masked_cons < masked_prod || \ + size <= ring_size - *masked_cons) { \ + memcpy(opaque, buf + *masked_cons, size); \ + } else { \ + memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \ + memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \ + size - (ring_size - *masked_cons)); \ + } \ + *masked_cons = name##_mask(*masked_cons + size, ring_size); \ +} \ + \ +static inline void name##_write_packet(unsigned char *buf, \ + const void *opaque, \ + size_t size, \ + RING_IDX *masked_prod, \ + RING_IDX masked_cons, \ + RING_IDX ring_size) \ +{ \ + if (*masked_prod < masked_cons || \ + size <= ring_size - *masked_prod) { \ + memcpy(buf + *masked_prod, opaque, size); \ + } else { \ + memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \ + memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \ + size - (ring_size - *masked_prod)); \ + } \ + *masked_prod = name##_mask(*masked_prod + size, ring_size); \ +} \ + \ +static inline RING_IDX name##_queued(RING_IDX prod, \ + RING_IDX cons, \ + RING_IDX ring_size) \ +{ \ + RING_IDX size; \ + \ + if (prod == cons) \ + return 0; \ + \ + prod = name##_mask(prod, ring_size); \ + cons = name##_mask(cons, ring_size); \ + \ + if (prod == cons) \ + return ring_size; \ + \ + if (prod > cons) \ + size = prod - cons; \ + else \ + size = ring_size - (cons - prod); \ + return size; \ +} \ + \ +struct name##_data { \ + unsigned char *in; /* half of the allocation */ \ + unsigned char *out; /* half of the allocation */ \ +} + +#define DEFINE_XEN_FLEX_RING_AND_INTF(name) \ +struct name##_data_intf { \ + RING_IDX in_cons, in_prod; \ + \ + uint8_t pad1[56]; \ + \ + RING_IDX out_cons, out_prod; \ + \ + uint8_t pad2[56]; \ + \ + RING_IDX ring_order; \ + grant_ref_t ref[]; \ +}; \ +DEFINE_XEN_FLEX_RING(name) + +#endif /* __XEN_PUBLIC_IO_RING_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h index 09c2ce5170..7efcdaa8fe 100644 --- a/include/hw/xen/xen.h +++ b/include/hw/xen/xen.h @@ -21,6 +21,7 @@ enum xen_mode { extern uint32_t xen_domid; extern enum xen_mode xen_mode; +extern bool xen_domid_restrict; extern bool xen_allowed; @@ -43,7 +44,7 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory); void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, struct MemoryRegion *mr, Error **errp); -void xen_modified_memory(ram_addr_t start, ram_addr_t length); +void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length); void xen_register_framebuffer(struct MemoryRegion *mr); diff --git a/include/hw/xen/xen_backend.h b/include/hw/xen/xen_backend.h index 4f4799a610..852c2ea64c 100644 --- a/include/hw/xen/xen_backend.h +++ b/include/hw/xen/xen_backend.h @@ -14,8 +14,6 @@ OBJECT_CHECK(XenDevice, (obj), TYPE_XENBACKEND) /* variables */ -extern xc_interface *xen_xc; -extern xenforeignmemory_handle *xen_fmem; extern struct xs_handle *xenstore; extern const char *xen_protocol; extern DeviceState *xen_sysdev; @@ -49,6 +47,9 @@ extern struct XenDevOps xen_console_ops; /* xen_console.c */ extern struct XenDevOps xen_kbdmouse_ops; /* xen_framebuffer.c */ extern struct XenDevOps xen_framebuffer_ops; /* xen_framebuffer.c */ extern struct XenDevOps xen_blkdev_ops; /* xen_disk.c */ +#ifdef CONFIG_VIRTFS +extern struct XenDevOps xen_9pfs_ops; /* xen-9p-backend.c */ +#endif extern struct XenDevOps xen_netdev_ops; /* xen_nic.c */ #ifdef CONFIG_USB_LIBUSB extern struct XenDevOps xen_usb_ops; /* xen-usb.c */ diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h index dce76ee162..e00ddd7b5b 100644 --- a/include/hw/xen/xen_common.h +++ b/include/hw/xen/xen_common.h @@ -20,12 +20,14 @@ #include "qemu/queue.h" #include "hw/xen/trace.h" +extern xc_interface *xen_xc; + /* * We don't support Xen prior to 4.2.0. */ /* Xen 4.2 through 4.6 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701 typedef xc_interface xenforeignmemory_handle; typedef xc_evtchn xenevtchn_handle; @@ -51,6 +53,7 @@ typedef xc_gnttab xengnttab_handle; xc_gnttab_map_domain_grant_refs(h, c, d, r, p) #define xenforeignmemory_open(l, f) xen_xc +#define xenforeignmemory_close(h) static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom, int prot, size_t pages, @@ -65,7 +68,7 @@ static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom, #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE) -#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */ +#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */ #include <xenevtchn.h> #include <xengnttab.h> @@ -73,6 +76,230 @@ static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom, #endif +extern xenforeignmemory_handle *xen_fmem; + +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900 + +typedef xc_interface xendevicemodel_handle; + +static inline xendevicemodel_handle *xendevicemodel_open( + struct xentoollog_logger *logger, unsigned int open_flags) +{ + return xen_xc; +} + +#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 + +static inline int xendevicemodel_create_ioreq_server( + xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq, + ioservid_t *id) +{ + return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq, + id); +} + +static inline int xendevicemodel_get_ioreq_server_info( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, + xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn, + evtchn_port_t *bufioreq_port) +{ + return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn, + bufioreq_pfn, bufioreq_port); +} + +static inline int xendevicemodel_map_io_range_to_ioreq_server( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio, + uint64_t start, uint64_t end) +{ + return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio, + start, end); +} + +static inline int xendevicemodel_unmap_io_range_from_ioreq_server( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio, + uint64_t start, uint64_t end) +{ + return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio, + start, end); +} + +static inline int xendevicemodel_map_pcidev_to_ioreq_server( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, + uint16_t segment, uint8_t bus, uint8_t device, uint8_t function) +{ + return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment, + bus, device, function); +} + +static inline int xendevicemodel_unmap_pcidev_from_ioreq_server( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, + uint16_t segment, uint8_t bus, uint8_t device, uint8_t function) +{ + return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment, + bus, device, function); +} + +static inline int xendevicemodel_destroy_ioreq_server( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id) +{ + return xc_hvm_destroy_ioreq_server(dmod, domid, id); +} + +static inline int xendevicemodel_set_ioreq_server_state( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled) +{ + return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled); +} + +#endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */ + +static inline int xendevicemodel_set_pci_intx_level( + xendevicemodel_handle *dmod, domid_t domid, uint16_t segment, + uint8_t bus, uint8_t device, uint8_t intx, unsigned int level) +{ + return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device, + intx, level); +} + +static inline int xendevicemodel_set_isa_irq_level( + xendevicemodel_handle *dmod, domid_t domid, uint8_t irq, + unsigned int level) +{ + return xc_hvm_set_isa_irq_level(dmod, domid, irq, level); +} + +static inline int xendevicemodel_set_pci_link_route( + xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq) +{ + return xc_hvm_set_pci_link_route(dmod, domid, link, irq); +} + +static inline int xendevicemodel_inject_msi( + xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr, + uint32_t msi_data) +{ + return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data); +} + +static inline int xendevicemodel_track_dirty_vram( + xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn, + uint32_t nr, unsigned long *dirty_bitmap) +{ + return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr, + dirty_bitmap); +} + +static inline int xendevicemodel_modified_memory( + xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn, + uint32_t nr) +{ + return xc_hvm_modified_memory(dmod, domid, first_pfn, nr); +} + +static inline int xendevicemodel_set_mem_type( + xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type, + uint64_t first_pfn, uint32_t nr) +{ + return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr); +} + +static inline int xendevicemodel_restrict( + xendevicemodel_handle *dmod, domid_t domid) +{ + errno = ENOTTY; + return -1; +} + +static inline int xenforeignmemory_restrict( + xenforeignmemory_handle *fmem, domid_t domid) +{ + errno = ENOTTY; + return -1; +} + +#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */ + +#undef XC_WANT_COMPAT_DEVICEMODEL_API +#include <xendevicemodel.h> + +#endif + +extern xendevicemodel_handle *xen_dmod; + +static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type, + uint64_t first_pfn, uint32_t nr) +{ + return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn, + nr); +} + +static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment, + uint8_t bus, uint8_t device, + uint8_t intx, unsigned int level) +{ + return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus, + device, intx, level); +} + +static inline int xen_set_pci_link_route(domid_t domid, uint8_t link, + uint8_t irq) +{ + return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq); +} + +static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr, + uint32_t msi_data) +{ + return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data); +} + +static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq, + unsigned int level) +{ + return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level); +} + +static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn, + uint32_t nr, unsigned long *bitmap) +{ + return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr, + bitmap); +} + +static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn, + uint32_t nr) +{ + return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr); +} + +static inline int xen_restrict(domid_t domid) +{ + int rc; + + /* Attempt to restrict devicemodel operations */ + rc = xendevicemodel_restrict(xen_dmod, domid); + trace_xen_domid_restrict(rc ? errno : 0); + + if (rc < 0) { + /* + * If errno is ENOTTY then restriction is not implemented so + * there's no point in trying to restrict other types of + * operation, but it should not be treated as a failure. + */ + if (errno == ENOTTY) { + return 0; + } + + return rc; + } + + /* Restrict foreignmemory operations */ + rc = xenforeignmemory_restrict(xen_fmem, domid); + trace_xen_domid_restrict(rc ? errno : 0); + + return rc; +} + void destroy_hvm_domain(bool reboot); /* shutdown/destroy current domain because of an error */ @@ -99,7 +326,7 @@ static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom, #endif /* Xen before 4.6 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2 @@ -107,8 +334,7 @@ static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom, #endif -static inline int xen_get_default_ioreq_server_info(xc_interface *xc, - domid_t dom, +static inline int xen_get_default_ioreq_server_info(domid_t dom, xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn, evtchn_port_t @@ -117,7 +343,7 @@ static inline int xen_get_default_ioreq_server_info(xc_interface *xc, unsigned long param; int rc; - rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, ¶m); + rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, ¶m); if (rc < 0) { fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n"); return -1; @@ -125,7 +351,7 @@ static inline int xen_get_default_ioreq_server_info(xc_interface *xc, *ioreq_pfn = param; - rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, ¶m); + rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, ¶m); if (rc < 0) { fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n"); return -1; @@ -133,7 +359,7 @@ static inline int xen_get_default_ioreq_server_info(xc_interface *xc, *bufioreq_pfn = param; - rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN, + rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN, ¶m); if (rc < 0) { fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n"); @@ -146,7 +372,7 @@ static inline int xen_get_default_ioreq_server_info(xc_interface *xc, } /* Xen before 4.5 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN #define HVM_PARAM_BUFIOREQ_EVTCHN 26 @@ -156,63 +382,64 @@ static inline int xen_get_default_ioreq_server_info(xc_interface *xc, typedef uint16_t ioservid_t; -static inline void xen_map_memory_section(xc_interface *xc, domid_t dom, +static inline void xen_map_memory_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { } -static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom, +static inline void xen_unmap_memory_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { } -static inline void xen_map_io_section(xc_interface *xc, domid_t dom, +static inline void xen_map_io_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { } -static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom, +static inline void xen_unmap_io_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { } -static inline void xen_map_pcidev(xc_interface *xc, domid_t dom, +static inline void xen_map_pcidev(domid_t dom, ioservid_t ioservid, PCIDevice *pci_dev) { } -static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom, +static inline void xen_unmap_pcidev(domid_t dom, ioservid_t ioservid, PCIDevice *pci_dev) { } -static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom, +static inline void xen_create_ioreq_server(domid_t dom, ioservid_t *ioservid) { } -static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom, +static inline void xen_destroy_ioreq_server(domid_t dom, ioservid_t ioservid) { } -static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom, +static inline int xen_get_ioreq_server_info(domid_t dom, ioservid_t ioservid, xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn, evtchn_port_t *bufioreq_evtchn) { - return xen_get_default_ioreq_server_info(xc, dom, ioreq_pfn, bufioreq_pfn, + return xen_get_default_ioreq_server_info(dom, ioreq_pfn, + bufioreq_pfn, bufioreq_evtchn); } -static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom, +static inline int xen_set_ioreq_server_state(domid_t dom, ioservid_t ioservid, bool enable) { @@ -224,7 +451,7 @@ static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom, static bool use_default_ioreq_server; -static inline void xen_map_memory_section(xc_interface *xc, domid_t dom, +static inline void xen_map_memory_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { @@ -237,11 +464,11 @@ static inline void xen_map_memory_section(xc_interface *xc, domid_t dom, } trace_xen_map_mmio_range(ioservid, start_addr, end_addr); - xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1, - start_addr, end_addr); + xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1, + start_addr, end_addr); } -static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom, +static inline void xen_unmap_memory_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { @@ -253,13 +480,12 @@ static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom, return; } - trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr); - xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1, - start_addr, end_addr); + xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid, + 1, start_addr, end_addr); } -static inline void xen_map_io_section(xc_interface *xc, domid_t dom, +static inline void xen_map_io_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { @@ -271,13 +497,12 @@ static inline void xen_map_io_section(xc_interface *xc, domid_t dom, return; } - trace_xen_map_portio_range(ioservid, start_addr, end_addr); - xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0, - start_addr, end_addr); + xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0, + start_addr, end_addr); } -static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom, +static inline void xen_unmap_io_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { @@ -290,11 +515,11 @@ static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom, } trace_xen_unmap_portio_range(ioservid, start_addr, end_addr); - xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0, - start_addr, end_addr); + xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid, + 0, start_addr, end_addr); } -static inline void xen_map_pcidev(xc_interface *xc, domid_t dom, +static inline void xen_map_pcidev(domid_t dom, ioservid_t ioservid, PCIDevice *pci_dev) { @@ -304,13 +529,13 @@ static inline void xen_map_pcidev(xc_interface *xc, domid_t dom, trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus), PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); - xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid, - 0, pci_bus_num(pci_dev->bus), - PCI_SLOT(pci_dev->devfn), - PCI_FUNC(pci_dev->devfn)); + xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0, + pci_bus_num(pci_dev->bus), + PCI_SLOT(pci_dev->devfn), + PCI_FUNC(pci_dev->devfn)); } -static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom, +static inline void xen_unmap_pcidev(domid_t dom, ioservid_t ioservid, PCIDevice *pci_dev) { @@ -320,17 +545,18 @@ static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom, trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus), PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); - xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid, - 0, pci_bus_num(pci_dev->bus), - PCI_SLOT(pci_dev->devfn), - PCI_FUNC(pci_dev->devfn)); + xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0, + pci_bus_num(pci_dev->bus), + PCI_SLOT(pci_dev->devfn), + PCI_FUNC(pci_dev->devfn)); } -static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom, +static inline void xen_create_ioreq_server(domid_t dom, ioservid_t *ioservid) { - int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC, - ioservid); + int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom, + HVM_IOREQSRV_BUFIOREQ_ATOMIC, + ioservid); if (rc == 0) { trace_xen_ioreq_server_create(*ioservid); @@ -342,7 +568,7 @@ static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom, trace_xen_default_ioreq_server(); } -static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom, +static inline void xen_destroy_ioreq_server(domid_t dom, ioservid_t ioservid) { if (use_default_ioreq_server) { @@ -350,27 +576,27 @@ static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom, } trace_xen_ioreq_server_destroy(ioservid); - xc_hvm_destroy_ioreq_server(xc, dom, ioservid); + xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid); } -static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom, +static inline int xen_get_ioreq_server_info(domid_t dom, ioservid_t ioservid, xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn, evtchn_port_t *bufioreq_evtchn) { if (use_default_ioreq_server) { - return xen_get_default_ioreq_server_info(xc, dom, ioreq_pfn, + return xen_get_default_ioreq_server_info(dom, ioreq_pfn, bufioreq_pfn, bufioreq_evtchn); } - return xc_hvm_get_ioreq_server_info(xc, dom, ioservid, - ioreq_pfn, bufioreq_pfn, - bufioreq_evtchn); + return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid, + ioreq_pfn, bufioreq_pfn, + bufioreq_evtchn); } -static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom, +static inline int xen_set_ioreq_server_state(domid_t dom, ioservid_t ioservid, bool enable) { @@ -379,12 +605,13 @@ static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom, } trace_xen_ioreq_server_state(ioservid, enable); - return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable); + return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid, + enable); } #endif -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid, unsigned int space, unsigned long idx, @@ -407,7 +634,7 @@ static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid #endif #ifdef CONFIG_XEN_PV_DOMAIN_BUILD -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40700 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref, xen_domain_handle_t handle, uint32_t flags, uint32_t *pdomid) @@ -426,7 +653,7 @@ static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref, /* Xen before 4.8 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 480 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800 typedef void *xengnttab_grant_copy_segment_t; diff --git a/qemu-options.hx b/qemu-options.hx index 9171bd5eec..b9a2463919 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -3378,6 +3378,11 @@ DEF("xen-attach", 0, QEMU_OPTION_xen_attach, "-xen-attach attach to existing xen domain\n" " xend will use this when starting QEMU\n", QEMU_ARCH_ALL) +DEF("xen-domid-restrict", 0, QEMU_OPTION_xen_domid_restrict, + "-xen-domid-restrict restrict set of available xen operations\n" + " to specified domain id. (Does not affect\n" + " xenpv machine type).\n", + QEMU_ARCH_ALL) STEXI @item -xen-domid @var{id} @findex -xen-domid @@ -3390,6 +3395,8 @@ Warning: should not be used when xend is in use (XEN only). @findex -xen-attach Attach to existing xen domain. xend will use this when starting QEMU (XEN only). +@findex -xen-domid-restrict +Restrict set of available xen operations to specified domain id (XEN only). ETEXI DEF("no-reboot", 0, QEMU_OPTION_no_reboot, \ diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs index 224f04ba69..f5b47bfd74 100644 --- a/stubs/Makefile.objs +++ b/stubs/Makefile.objs @@ -37,3 +37,5 @@ stub-obj-y += target-monitor-defs.o stub-obj-y += target-get-monitor-def.o stub-obj-y += pc_madt_cpu_entry.o stub-obj-y += vmgenid.o +stub-obj-y += xen-common.o +stub-obj-y += xen-hvm.o diff --git a/xen-common-stub.c b/stubs/xen-common.c index 09fce2dd36..09fce2dd36 100644 --- a/xen-common-stub.c +++ b/stubs/xen-common.c diff --git a/xen-hvm-stub.c b/stubs/xen-hvm.c index c5003251cb..3ca6c51b21 100644 --- a/xen-hvm-stub.c +++ b/stubs/xen-hvm.c @@ -50,7 +50,7 @@ void xen_register_framebuffer(MemoryRegion *mr) { } -void xen_modified_memory(ram_addr_t start, ram_addr_t length) +void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) { } diff --git a/trace-events b/trace-events index b07a09ba95..e582d6315d 100644 --- a/trace-events +++ b/trace-events @@ -48,22 +48,6 @@ spice_vmc_register_interface(void *scd) "spice vmc registered interface %p" spice_vmc_unregister_interface(void *scd) "spice vmc unregistered interface %p" spice_vmc_event(int event) "spice vmc event %d" -# xen-hvm.c -xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: %#lx, size %#lx" -xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "%#"PRIx64" size %#lx, log_dirty %i" -handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" -handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p read type=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" -handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p write type=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" -cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" -cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=%#"PRIx64" port=%#"PRIx64" size=%d" -cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=%#"PRIx64" port=%#"PRIx64" size=%d" -cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" - -# xen-mapcache.c -xen_map_cache(uint64_t phys_addr) "want %#"PRIx64 -xen_remap_bucket(uint64_t index) "index %#"PRIx64 -xen_map_cache_return(void* ptr) "%p" - # monitor.c monitor_protocol_event_handler(uint32_t event, void *qdict) "event=%d data=%p" monitor_protocol_event_emit(uint32_t event, void *data) "event=%d data=%p" @@ -205,6 +205,7 @@ static NotifierList machine_init_done_notifiers = bool xen_allowed; uint32_t xen_domid; enum xen_mode xen_mode = XEN_EMULATE; +bool xen_domid_restrict; static int has_defaults = 1; static int default_serial = 1; @@ -3933,6 +3934,13 @@ int main(int argc, char **argv, char **envp) } xen_mode = XEN_ATTACH; break; + case QEMU_OPTION_xen_domid_restrict: + if (!(xen_available())) { + error_report("Option not supported for this target"); + exit(1); + } + xen_domid_restrict = true; + break; case QEMU_OPTION_trace: g_free(trace_file); trace_file = trace_opt_parse(optarg); |