diff options
author | David Woodhouse <dwmw@amazon.co.uk> | 2023-01-01 21:31:37 +0000 |
---|---|---|
committer | David Woodhouse <dwmw@amazon.co.uk> | 2023-03-07 17:04:30 +0000 |
commit | c412ba47b2ec4c75e1ef84f39f898cfdec0630ad (patch) | |
tree | 7141b1df3087e198dbe310e4c783d0a8d541a41a /include/hw/xen/xen_backend_ops.h | |
parent | b6cacfea0b38300e3ea5fd6d486d5085122554eb (diff) |
hw/xen: Add gnttab operations to allow redirection to internal emulation
Move the existing code using libxengnttab to xen-operations.c and allow
the operations to be redirected so that we can add emulation of grant
table mapping for backend drivers.
In emulation, mapping more than one grant ref to be virtually contiguous
would be fairly difficult. The best way to do it might be to make the
ram_block mappings actually backed by a file (shmem or a deleted file,
perhaps) so that we can have multiple *shared* mappings of it. But that
would be fairly intrusive.
Making the backend drivers cope with page *lists* instead of expecting
the mapping to be contiguous is also non-trivial, since some structures
would actually *cross* page boundaries (e.g. the 32-bit blkif responses
which are 12 bytes).
So for now, we'll support only single-page mappings in emulation. Add a
XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE flag to indicate that the native Xen
implementation *does* support multi-page maps, and a helper function to
query it.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
Diffstat (limited to 'include/hw/xen/xen_backend_ops.h')
-rw-r--r-- | include/hw/xen/xen_backend_ops.h | 100 |
1 files changed, 100 insertions, 0 deletions
diff --git a/include/hw/xen/xen_backend_ops.h b/include/hw/xen/xen_backend_ops.h index 9605456e81..bb3333ab00 100644 --- a/include/hw/xen/xen_backend_ops.h +++ b/include/hw/xen/xen_backend_ops.h @@ -29,6 +29,12 @@ typedef struct xenevtchn_handle xenevtchn_handle; typedef int xenevtchn_port_or_error_t; typedef uint32_t evtchn_port_t; +typedef uint16_t domid_t; +typedef uint32_t grant_ref_t; + +#define XEN_PAGE_SHIFT 12 +#define XEN_PAGE_SIZE (1UL << XEN_PAGE_SHIFT) +#define XEN_PAGE_MASK (~(XEN_PAGE_SIZE - 1)) struct evtchn_backend_ops { xenevtchn_handle *(*open)(void); @@ -113,6 +119,100 @@ static inline int qemu_xen_evtchn_pending(xenevtchn_handle *xc) return xen_evtchn_ops->pending(xc); } +typedef struct xengntdev_handle xengnttab_handle; + +typedef struct XenGrantCopySegment { + union { + void *virt; + struct { + uint32_t ref; + off_t offset; + } foreign; + } source, dest; + size_t len; +} XenGrantCopySegment; + +#define XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE (1U << 0) + +struct gnttab_backend_ops { + uint32_t features; + xengnttab_handle *(*open)(void); + int (*close)(xengnttab_handle *xgt); + int (*grant_copy)(xengnttab_handle *xgt, bool to_domain, uint32_t domid, + XenGrantCopySegment *segs, uint32_t nr_segs, + Error **errp); + int (*set_max_grants)(xengnttab_handle *xgt, uint32_t nr_grants); + void *(*map_refs)(xengnttab_handle *xgt, uint32_t count, uint32_t domid, + uint32_t *refs, int prot); + int (*unmap)(xengnttab_handle *xgt, void *start_address, uint32_t count); +}; + +extern struct gnttab_backend_ops *xen_gnttab_ops; + +static inline bool qemu_xen_gnttab_can_map_multi(void) +{ + return xen_gnttab_ops && + !!(xen_gnttab_ops->features & XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE); +} + +static inline xengnttab_handle *qemu_xen_gnttab_open(void) +{ + if (!xen_gnttab_ops) { + return NULL; + } + return xen_gnttab_ops->open(); +} + +static inline int qemu_xen_gnttab_close(xengnttab_handle *xgt) +{ + if (!xen_gnttab_ops) { + return -ENOSYS; + } + return xen_gnttab_ops->close(xgt); +} + +static inline int qemu_xen_gnttab_grant_copy(xengnttab_handle *xgt, + bool to_domain, uint32_t domid, + XenGrantCopySegment *segs, + uint32_t nr_segs, Error **errp) +{ + if (!xen_gnttab_ops) { + return -ENOSYS; + } + + return xen_gnttab_ops->grant_copy(xgt, to_domain, domid, segs, nr_segs, + errp); +} + +static inline int qemu_xen_gnttab_set_max_grants(xengnttab_handle *xgt, + uint32_t nr_grants) +{ + if (!xen_gnttab_ops) { + return -ENOSYS; + } + return xen_gnttab_ops->set_max_grants(xgt, nr_grants); +} + +static inline void *qemu_xen_gnttab_map_refs(xengnttab_handle *xgt, + uint32_t count, uint32_t domid, + uint32_t *refs, int prot) +{ + if (!xen_gnttab_ops) { + return NULL; + } + return xen_gnttab_ops->map_refs(xgt, count, domid, refs, prot); +} + +static inline int qemu_xen_gnttab_unmap(xengnttab_handle *xgt, + void *start_address, + uint32_t count) +{ + if (!xen_gnttab_ops) { + return -ENOSYS; + } + return xen_gnttab_ops->unmap(xgt, start_address, count); +} + void setup_xen_backend_ops(void); #endif /* QEMU_XEN_BACKEND_OPS_H */ |