diff options
Diffstat (limited to 'include/hw/xen')
-rw-r--r-- | include/hw/xen/io/ring.h | 482 | ||||
-rw-r--r-- | include/hw/xen/xen.h | 3 | ||||
-rw-r--r-- | include/hw/xen/xen_backend.h | 5 | ||||
-rw-r--r-- | include/hw/xen/xen_common.h | 345 |
4 files changed, 773 insertions, 62 deletions
diff --git a/include/hw/xen/io/ring.h b/include/hw/xen/io/ring.h new file mode 100644 index 0000000000..abbca47687 --- /dev/null +++ b/include/hw/xen/io/ring.h @@ -0,0 +1,482 @@ +/****************************************************************************** + * ring.h + * + * Shared producer-consumer ring macros. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Tim Deegan and Andrew Warfield November 2004. + */ + +#ifndef __XEN_PUBLIC_IO_RING_H__ +#define __XEN_PUBLIC_IO_RING_H__ + +/* + * When #include'ing this header, you need to provide the following + * declaration upfront: + * - standard integers types (uint8_t, uint16_t, etc) + * They are provided by stdint.h of the standard headers. + * + * In addition, if you intend to use the FLEX macros, you also need to + * provide the following, before invoking the FLEX macros: + * - size_t + * - memcpy + * - grant_ref_t + * These declarations are provided by string.h of the standard headers, + * and grant_table.h from the Xen public headers. + */ + +#if __XEN_INTERFACE_VERSION__ < 0x00030208 +#define xen_mb() mb() +#define xen_rmb() rmb() +#define xen_wmb() wmb() +#endif + +typedef unsigned int RING_IDX; + +/* Round a 32-bit unsigned constant down to the nearest power of two. */ +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) +#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) +#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) +#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) +#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) + +/* + * Calculate size of a shared ring, given the total available space for the + * ring and indexes (_sz), and the name tag of the request/response structure. + * A ring contains as many entries as will fit, rounded down to the nearest + * power of two (so we can mask with (size-1) to loop around). + */ +#define __CONST_RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ + sizeof(((struct _s##_sring *)0)->ring[0]))) +/* + * The same for passing in an actual pointer instead of a name tag. + */ +#define __RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) + +/* + * Macros to make the correct C datatypes for a new kind of ring. + * + * To make a new ring datatype, you need to have two message structures, + * let's say request_t, and response_t already defined. + * + * In a header where you want the ring datatype declared, you then do: + * + * DEFINE_RING_TYPES(mytag, request_t, response_t); + * + * These expand out to give you a set of types, as you can see below. + * The most important of these are: + * + * mytag_sring_t - The shared ring. + * mytag_front_ring_t - The 'front' half of the ring. + * mytag_back_ring_t - The 'back' half of the ring. + * + * To initialize a ring in your code you need to know the location and size + * of the shared memory area (PAGE_SIZE, for instance). To initialise + * the front half: + * + * mytag_front_ring_t front_ring; + * SHARED_RING_INIT((mytag_sring_t *)shared_page); + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); + * + * Initializing the back follows similarly (note that only the front + * initializes the shared ring): + * + * mytag_back_ring_t back_ring; + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); + */ + +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ + \ +/* Shared ring entry */ \ +union __name##_sring_entry { \ + __req_t req; \ + __rsp_t rsp; \ +}; \ + \ +/* Shared ring page */ \ +struct __name##_sring { \ + RING_IDX req_prod, req_event; \ + RING_IDX rsp_prod, rsp_event; \ + union { \ + struct { \ + uint8_t smartpoll_active; \ + } netif; \ + struct { \ + uint8_t msg; \ + } tapif_user; \ + uint8_t pvt_pad[4]; \ + } pvt; \ + uint8_t __pad[44]; \ + union __name##_sring_entry ring[1]; /* variable-length */ \ +}; \ + \ +/* "Front" end's private variables */ \ +struct __name##_front_ring { \ + RING_IDX req_prod_pvt; \ + RING_IDX rsp_cons; \ + unsigned int nr_ents; \ + struct __name##_sring *sring; \ +}; \ + \ +/* "Back" end's private variables */ \ +struct __name##_back_ring { \ + RING_IDX rsp_prod_pvt; \ + RING_IDX req_cons; \ + unsigned int nr_ents; \ + struct __name##_sring *sring; \ +}; \ + \ +/* Syntactic sugar */ \ +typedef struct __name##_sring __name##_sring_t; \ +typedef struct __name##_front_ring __name##_front_ring_t; \ +typedef struct __name##_back_ring __name##_back_ring_t + +/* + * Macros for manipulating rings. + * + * FRONT_RING_whatever works on the "front end" of a ring: here + * requests are pushed on to the ring and responses taken off it. + * + * BACK_RING_whatever works on the "back end" of a ring: here + * requests are taken off the ring and responses put on. + * + * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. + * This is OK in 1-for-1 request-response situations where the + * requestor (front end) never has more than RING_SIZE()-1 + * outstanding requests. + */ + +/* Initialising empty rings */ +#define SHARED_RING_INIT(_s) do { \ + (_s)->req_prod = (_s)->rsp_prod = 0; \ + (_s)->req_event = (_s)->rsp_event = 1; \ + (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \ + (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ +} while(0) + +#define FRONT_RING_INIT(_r, _s, __size) do { \ + (_r)->req_prod_pvt = 0; \ + (_r)->rsp_cons = 0; \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ + (_r)->sring = (_s); \ +} while (0) + +#define BACK_RING_INIT(_r, _s, __size) do { \ + (_r)->rsp_prod_pvt = 0; \ + (_r)->req_cons = 0; \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ + (_r)->sring = (_s); \ +} while (0) + +/* How big is this ring? */ +#define RING_SIZE(_r) \ + ((_r)->nr_ents) + +/* Number of free requests (for use on front side only). */ +#define RING_FREE_REQUESTS(_r) \ + (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) + +/* Test if there is an empty slot available on the front ring. + * (This is only meaningful from the front. ) + */ +#define RING_FULL(_r) \ + (RING_FREE_REQUESTS(_r) == 0) + +/* Test if there are outstanding messages to be processed on a ring. */ +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \ + ((_r)->sring->rsp_prod - (_r)->rsp_cons) + +#ifdef __GNUC__ +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ + unsigned int rsp = RING_SIZE(_r) - \ + ((_r)->req_cons - (_r)->rsp_prod_pvt); \ + req < rsp ? req : rsp; \ +}) +#else +/* Same as above, but without the nice GCC ({ ... }) syntax. */ +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \ + ((((_r)->sring->req_prod - (_r)->req_cons) < \ + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ + ((_r)->sring->req_prod - (_r)->req_cons) : \ + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) +#endif + +/* Direct access to individual ring elements, by index. */ +#define RING_GET_REQUEST(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) + +/* + * Get a local copy of a request. + * + * Use this in preference to RING_GET_REQUEST() so all processing is + * done on a local copy that cannot be modified by the other end. + * + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this + * to be ineffective where _req is a struct which consists of only bitfields. + */ +#define RING_COPY_REQUEST(_r, _idx, _req) do { \ + /* Use volatile to force the copy into _req. */ \ + *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ +} while (0) + +#define RING_GET_RESPONSE(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) + +/* Loop termination condition: Would the specified index overflow the ring? */ +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ + (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) + +/* Ill-behaved frontend determination: Can there be this many requests? */ +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) + +#define RING_PUSH_REQUESTS(_r) do { \ + xen_wmb(); /* back sees requests /before/ updated producer index */ \ + (_r)->sring->req_prod = (_r)->req_prod_pvt; \ +} while (0) + +#define RING_PUSH_RESPONSES(_r) do { \ + xen_wmb(); /* front sees resps /before/ updated producer index */ \ + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ +} while (0) + +/* + * Notification hold-off (req_event and rsp_event): + * + * When queueing requests or responses on a shared ring, it may not always be + * necessary to notify the remote end. For example, if requests are in flight + * in a backend, the front may be able to queue further requests without + * notifying the back (if the back checks for new requests when it queues + * responses). + * + * When enqueuing requests or responses: + * + * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument + * is a boolean return value. True indicates that the receiver requires an + * asynchronous notification. + * + * After dequeuing requests or responses (before sleeping the connection): + * + * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). + * The second argument is a boolean return value. True indicates that there + * are pending messages on the ring (i.e., the connection should not be put + * to sleep). + * + * These macros will set the req_event/rsp_event field to trigger a + * notification on the very next message that is enqueued. If you want to + * create batches of work (i.e., only receive a notification after several + * messages have been enqueued) then you will need to create a customised + * version of the FINAL_CHECK macro in your own code, which sets the event + * field appropriately. + */ + +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ + RING_IDX __old = (_r)->sring->req_prod; \ + RING_IDX __new = (_r)->req_prod_pvt; \ + xen_wmb(); /* back sees requests /before/ updated producer index */ \ + (_r)->sring->req_prod = __new; \ + xen_mb(); /* back sees new requests /before/ we check req_event */ \ + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ + (RING_IDX)(__new - __old)); \ +} while (0) + +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ + RING_IDX __old = (_r)->sring->rsp_prod; \ + RING_IDX __new = (_r)->rsp_prod_pvt; \ + xen_wmb(); /* front sees resps /before/ updated producer index */ \ + (_r)->sring->rsp_prod = __new; \ + xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ + (RING_IDX)(__new - __old)); \ +} while (0) + +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ + if (_work_to_do) break; \ + (_r)->sring->req_event = (_r)->req_cons + 1; \ + xen_mb(); \ + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ +} while (0) + +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ + if (_work_to_do) break; \ + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ + xen_mb(); \ + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ +} while (0) + + +/* + * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and + * functions to check if there is data on the ring, and to read and + * write to them. + * + * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but + * does not define the indexes page. As different protocols can have + * extensions to the basic format, this macro allow them to define their + * own struct. + * + * XEN_FLEX_RING_SIZE + * Convenience macro to calculate the size of one of the two rings + * from the overall order. + * + * $NAME_mask + * Function to apply the size mask to an index, to reduce the index + * within the range [0-size]. + * + * $NAME_read_packet + * Function to read data from the ring. The amount of data to read is + * specified by the "size" argument. + * + * $NAME_write_packet + * Function to write data to the ring. The amount of data to write is + * specified by the "size" argument. + * + * $NAME_get_ring_ptr + * Convenience function that returns a pointer to read/write to the + * ring at the right location. + * + * $NAME_data_intf + * Indexes page, shared between frontend and backend. It also + * contains the array of grant refs. + * + * $NAME_queued + * Function to calculate how many bytes are currently on the ring, + * ready to be read. It can also be used to calculate how much free + * space is currently on the ring (XEN_FLEX_RING_SIZE() - + * $NAME_queued()). + */ + +#ifndef XEN_PAGE_SHIFT +/* The PAGE_SIZE for ring protocols and hypercall interfaces is always + * 4K, regardless of the architecture, and page granularity chosen by + * operating systems. + */ +#define XEN_PAGE_SHIFT 12 +#endif +#define XEN_FLEX_RING_SIZE(order) \ + (1UL << ((order) + XEN_PAGE_SHIFT - 1)) + +#define DEFINE_XEN_FLEX_RING(name) \ +static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \ +{ \ + return idx & (ring_size - 1); \ +} \ + \ +static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \ + RING_IDX idx, \ + RING_IDX ring_size) \ +{ \ + return buf + name##_mask(idx, ring_size); \ +} \ + \ +static inline void name##_read_packet(void *opaque, \ + const unsigned char *buf, \ + size_t size, \ + RING_IDX masked_prod, \ + RING_IDX *masked_cons, \ + RING_IDX ring_size) \ +{ \ + if (*masked_cons < masked_prod || \ + size <= ring_size - *masked_cons) { \ + memcpy(opaque, buf + *masked_cons, size); \ + } else { \ + memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \ + memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \ + size - (ring_size - *masked_cons)); \ + } \ + *masked_cons = name##_mask(*masked_cons + size, ring_size); \ +} \ + \ +static inline void name##_write_packet(unsigned char *buf, \ + const void *opaque, \ + size_t size, \ + RING_IDX *masked_prod, \ + RING_IDX masked_cons, \ + RING_IDX ring_size) \ +{ \ + if (*masked_prod < masked_cons || \ + size <= ring_size - *masked_prod) { \ + memcpy(buf + *masked_prod, opaque, size); \ + } else { \ + memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \ + memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \ + size - (ring_size - *masked_prod)); \ + } \ + *masked_prod = name##_mask(*masked_prod + size, ring_size); \ +} \ + \ +static inline RING_IDX name##_queued(RING_IDX prod, \ + RING_IDX cons, \ + RING_IDX ring_size) \ +{ \ + RING_IDX size; \ + \ + if (prod == cons) \ + return 0; \ + \ + prod = name##_mask(prod, ring_size); \ + cons = name##_mask(cons, ring_size); \ + \ + if (prod == cons) \ + return ring_size; \ + \ + if (prod > cons) \ + size = prod - cons; \ + else \ + size = ring_size - (cons - prod); \ + return size; \ +} \ + \ +struct name##_data { \ + unsigned char *in; /* half of the allocation */ \ + unsigned char *out; /* half of the allocation */ \ +} + +#define DEFINE_XEN_FLEX_RING_AND_INTF(name) \ +struct name##_data_intf { \ + RING_IDX in_cons, in_prod; \ + \ + uint8_t pad1[56]; \ + \ + RING_IDX out_cons, out_prod; \ + \ + uint8_t pad2[56]; \ + \ + RING_IDX ring_order; \ + grant_ref_t ref[]; \ +}; \ +DEFINE_XEN_FLEX_RING(name) + +#endif /* __XEN_PUBLIC_IO_RING_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h index 09c2ce5170..7efcdaa8fe 100644 --- a/include/hw/xen/xen.h +++ b/include/hw/xen/xen.h @@ -21,6 +21,7 @@ enum xen_mode { extern uint32_t xen_domid; extern enum xen_mode xen_mode; +extern bool xen_domid_restrict; extern bool xen_allowed; @@ -43,7 +44,7 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory); void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, struct MemoryRegion *mr, Error **errp); -void xen_modified_memory(ram_addr_t start, ram_addr_t length); +void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length); void xen_register_framebuffer(struct MemoryRegion *mr); diff --git a/include/hw/xen/xen_backend.h b/include/hw/xen/xen_backend.h index 4f4799a610..852c2ea64c 100644 --- a/include/hw/xen/xen_backend.h +++ b/include/hw/xen/xen_backend.h @@ -14,8 +14,6 @@ OBJECT_CHECK(XenDevice, (obj), TYPE_XENBACKEND) /* variables */ -extern xc_interface *xen_xc; -extern xenforeignmemory_handle *xen_fmem; extern struct xs_handle *xenstore; extern const char *xen_protocol; extern DeviceState *xen_sysdev; @@ -49,6 +47,9 @@ extern struct XenDevOps xen_console_ops; /* xen_console.c */ extern struct XenDevOps xen_kbdmouse_ops; /* xen_framebuffer.c */ extern struct XenDevOps xen_framebuffer_ops; /* xen_framebuffer.c */ extern struct XenDevOps xen_blkdev_ops; /* xen_disk.c */ +#ifdef CONFIG_VIRTFS +extern struct XenDevOps xen_9pfs_ops; /* xen-9p-backend.c */ +#endif extern struct XenDevOps xen_netdev_ops; /* xen_nic.c */ #ifdef CONFIG_USB_LIBUSB extern struct XenDevOps xen_usb_ops; /* xen-usb.c */ diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h index dce76ee162..e00ddd7b5b 100644 --- a/include/hw/xen/xen_common.h +++ b/include/hw/xen/xen_common.h @@ -20,12 +20,14 @@ #include "qemu/queue.h" #include "hw/xen/trace.h" +extern xc_interface *xen_xc; + /* * We don't support Xen prior to 4.2.0. */ /* Xen 4.2 through 4.6 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701 typedef xc_interface xenforeignmemory_handle; typedef xc_evtchn xenevtchn_handle; @@ -51,6 +53,7 @@ typedef xc_gnttab xengnttab_handle; xc_gnttab_map_domain_grant_refs(h, c, d, r, p) #define xenforeignmemory_open(l, f) xen_xc +#define xenforeignmemory_close(h) static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom, int prot, size_t pages, @@ -65,7 +68,7 @@ static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom, #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE) -#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */ +#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */ #include <xenevtchn.h> #include <xengnttab.h> @@ -73,6 +76,230 @@ static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom, #endif +extern xenforeignmemory_handle *xen_fmem; + +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900 + +typedef xc_interface xendevicemodel_handle; + +static inline xendevicemodel_handle *xendevicemodel_open( + struct xentoollog_logger *logger, unsigned int open_flags) +{ + return xen_xc; +} + +#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 + +static inline int xendevicemodel_create_ioreq_server( + xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq, + ioservid_t *id) +{ + return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq, + id); +} + +static inline int xendevicemodel_get_ioreq_server_info( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, + xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn, + evtchn_port_t *bufioreq_port) +{ + return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn, + bufioreq_pfn, bufioreq_port); +} + +static inline int xendevicemodel_map_io_range_to_ioreq_server( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio, + uint64_t start, uint64_t end) +{ + return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio, + start, end); +} + +static inline int xendevicemodel_unmap_io_range_from_ioreq_server( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio, + uint64_t start, uint64_t end) +{ + return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio, + start, end); +} + +static inline int xendevicemodel_map_pcidev_to_ioreq_server( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, + uint16_t segment, uint8_t bus, uint8_t device, uint8_t function) +{ + return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment, + bus, device, function); +} + +static inline int xendevicemodel_unmap_pcidev_from_ioreq_server( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, + uint16_t segment, uint8_t bus, uint8_t device, uint8_t function) +{ + return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment, + bus, device, function); +} + +static inline int xendevicemodel_destroy_ioreq_server( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id) +{ + return xc_hvm_destroy_ioreq_server(dmod, domid, id); +} + +static inline int xendevicemodel_set_ioreq_server_state( + xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled) +{ + return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled); +} + +#endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */ + +static inline int xendevicemodel_set_pci_intx_level( + xendevicemodel_handle *dmod, domid_t domid, uint16_t segment, + uint8_t bus, uint8_t device, uint8_t intx, unsigned int level) +{ + return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device, + intx, level); +} + +static inline int xendevicemodel_set_isa_irq_level( + xendevicemodel_handle *dmod, domid_t domid, uint8_t irq, + unsigned int level) +{ + return xc_hvm_set_isa_irq_level(dmod, domid, irq, level); +} + +static inline int xendevicemodel_set_pci_link_route( + xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq) +{ + return xc_hvm_set_pci_link_route(dmod, domid, link, irq); +} + +static inline int xendevicemodel_inject_msi( + xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr, + uint32_t msi_data) +{ + return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data); +} + +static inline int xendevicemodel_track_dirty_vram( + xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn, + uint32_t nr, unsigned long *dirty_bitmap) +{ + return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr, + dirty_bitmap); +} + +static inline int xendevicemodel_modified_memory( + xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn, + uint32_t nr) +{ + return xc_hvm_modified_memory(dmod, domid, first_pfn, nr); +} + +static inline int xendevicemodel_set_mem_type( + xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type, + uint64_t first_pfn, uint32_t nr) +{ + return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr); +} + +static inline int xendevicemodel_restrict( + xendevicemodel_handle *dmod, domid_t domid) +{ + errno = ENOTTY; + return -1; +} + +static inline int xenforeignmemory_restrict( + xenforeignmemory_handle *fmem, domid_t domid) +{ + errno = ENOTTY; + return -1; +} + +#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */ + +#undef XC_WANT_COMPAT_DEVICEMODEL_API +#include <xendevicemodel.h> + +#endif + +extern xendevicemodel_handle *xen_dmod; + +static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type, + uint64_t first_pfn, uint32_t nr) +{ + return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn, + nr); +} + +static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment, + uint8_t bus, uint8_t device, + uint8_t intx, unsigned int level) +{ + return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus, + device, intx, level); +} + +static inline int xen_set_pci_link_route(domid_t domid, uint8_t link, + uint8_t irq) +{ + return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq); +} + +static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr, + uint32_t msi_data) +{ + return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data); +} + +static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq, + unsigned int level) +{ + return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level); +} + +static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn, + uint32_t nr, unsigned long *bitmap) +{ + return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr, + bitmap); +} + +static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn, + uint32_t nr) +{ + return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr); +} + +static inline int xen_restrict(domid_t domid) +{ + int rc; + + /* Attempt to restrict devicemodel operations */ + rc = xendevicemodel_restrict(xen_dmod, domid); + trace_xen_domid_restrict(rc ? errno : 0); + + if (rc < 0) { + /* + * If errno is ENOTTY then restriction is not implemented so + * there's no point in trying to restrict other types of + * operation, but it should not be treated as a failure. + */ + if (errno == ENOTTY) { + return 0; + } + + return rc; + } + + /* Restrict foreignmemory operations */ + rc = xenforeignmemory_restrict(xen_fmem, domid); + trace_xen_domid_restrict(rc ? errno : 0); + + return rc; +} + void destroy_hvm_domain(bool reboot); /* shutdown/destroy current domain because of an error */ @@ -99,7 +326,7 @@ static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom, #endif /* Xen before 4.6 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2 @@ -107,8 +334,7 @@ static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom, #endif -static inline int xen_get_default_ioreq_server_info(xc_interface *xc, - domid_t dom, +static inline int xen_get_default_ioreq_server_info(domid_t dom, xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn, evtchn_port_t @@ -117,7 +343,7 @@ static inline int xen_get_default_ioreq_server_info(xc_interface *xc, unsigned long param; int rc; - rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, ¶m); + rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, ¶m); if (rc < 0) { fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n"); return -1; @@ -125,7 +351,7 @@ static inline int xen_get_default_ioreq_server_info(xc_interface *xc, *ioreq_pfn = param; - rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, ¶m); + rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, ¶m); if (rc < 0) { fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n"); return -1; @@ -133,7 +359,7 @@ static inline int xen_get_default_ioreq_server_info(xc_interface *xc, *bufioreq_pfn = param; - rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN, + rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN, ¶m); if (rc < 0) { fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n"); @@ -146,7 +372,7 @@ static inline int xen_get_default_ioreq_server_info(xc_interface *xc, } /* Xen before 4.5 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN #define HVM_PARAM_BUFIOREQ_EVTCHN 26 @@ -156,63 +382,64 @@ static inline int xen_get_default_ioreq_server_info(xc_interface *xc, typedef uint16_t ioservid_t; -static inline void xen_map_memory_section(xc_interface *xc, domid_t dom, +static inline void xen_map_memory_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { } -static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom, +static inline void xen_unmap_memory_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { } -static inline void xen_map_io_section(xc_interface *xc, domid_t dom, +static inline void xen_map_io_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { } -static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom, +static inline void xen_unmap_io_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { } -static inline void xen_map_pcidev(xc_interface *xc, domid_t dom, +static inline void xen_map_pcidev(domid_t dom, ioservid_t ioservid, PCIDevice *pci_dev) { } -static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom, +static inline void xen_unmap_pcidev(domid_t dom, ioservid_t ioservid, PCIDevice *pci_dev) { } -static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom, +static inline void xen_create_ioreq_server(domid_t dom, ioservid_t *ioservid) { } -static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom, +static inline void xen_destroy_ioreq_server(domid_t dom, ioservid_t ioservid) { } -static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom, +static inline int xen_get_ioreq_server_info(domid_t dom, ioservid_t ioservid, xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn, evtchn_port_t *bufioreq_evtchn) { - return xen_get_default_ioreq_server_info(xc, dom, ioreq_pfn, bufioreq_pfn, + return xen_get_default_ioreq_server_info(dom, ioreq_pfn, + bufioreq_pfn, bufioreq_evtchn); } -static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom, +static inline int xen_set_ioreq_server_state(domid_t dom, ioservid_t ioservid, bool enable) { @@ -224,7 +451,7 @@ static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom, static bool use_default_ioreq_server; -static inline void xen_map_memory_section(xc_interface *xc, domid_t dom, +static inline void xen_map_memory_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { @@ -237,11 +464,11 @@ static inline void xen_map_memory_section(xc_interface *xc, domid_t dom, } trace_xen_map_mmio_range(ioservid, start_addr, end_addr); - xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1, - start_addr, end_addr); + xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1, + start_addr, end_addr); } -static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom, +static inline void xen_unmap_memory_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { @@ -253,13 +480,12 @@ static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom, return; } - trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr); - xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1, - start_addr, end_addr); + xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid, + 1, start_addr, end_addr); } -static inline void xen_map_io_section(xc_interface *xc, domid_t dom, +static inline void xen_map_io_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { @@ -271,13 +497,12 @@ static inline void xen_map_io_section(xc_interface *xc, domid_t dom, return; } - trace_xen_map_portio_range(ioservid, start_addr, end_addr); - xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0, - start_addr, end_addr); + xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0, + start_addr, end_addr); } -static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom, +static inline void xen_unmap_io_section(domid_t dom, ioservid_t ioservid, MemoryRegionSection *section) { @@ -290,11 +515,11 @@ static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom, } trace_xen_unmap_portio_range(ioservid, start_addr, end_addr); - xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0, - start_addr, end_addr); + xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid, + 0, start_addr, end_addr); } -static inline void xen_map_pcidev(xc_interface *xc, domid_t dom, +static inline void xen_map_pcidev(domid_t dom, ioservid_t ioservid, PCIDevice *pci_dev) { @@ -304,13 +529,13 @@ static inline void xen_map_pcidev(xc_interface *xc, domid_t dom, trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus), PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); - xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid, - 0, pci_bus_num(pci_dev->bus), - PCI_SLOT(pci_dev->devfn), - PCI_FUNC(pci_dev->devfn)); + xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0, + pci_bus_num(pci_dev->bus), + PCI_SLOT(pci_dev->devfn), + PCI_FUNC(pci_dev->devfn)); } -static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom, +static inline void xen_unmap_pcidev(domid_t dom, ioservid_t ioservid, PCIDevice *pci_dev) { @@ -320,17 +545,18 @@ static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom, trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus), PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); - xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid, - 0, pci_bus_num(pci_dev->bus), - PCI_SLOT(pci_dev->devfn), - PCI_FUNC(pci_dev->devfn)); + xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0, + pci_bus_num(pci_dev->bus), + PCI_SLOT(pci_dev->devfn), + PCI_FUNC(pci_dev->devfn)); } -static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom, +static inline void xen_create_ioreq_server(domid_t dom, ioservid_t *ioservid) { - int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC, - ioservid); + int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom, + HVM_IOREQSRV_BUFIOREQ_ATOMIC, + ioservid); if (rc == 0) { trace_xen_ioreq_server_create(*ioservid); @@ -342,7 +568,7 @@ static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom, trace_xen_default_ioreq_server(); } -static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom, +static inline void xen_destroy_ioreq_server(domid_t dom, ioservid_t ioservid) { if (use_default_ioreq_server) { @@ -350,27 +576,27 @@ static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom, } trace_xen_ioreq_server_destroy(ioservid); - xc_hvm_destroy_ioreq_server(xc, dom, ioservid); + xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid); } -static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom, +static inline int xen_get_ioreq_server_info(domid_t dom, ioservid_t ioservid, xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn, evtchn_port_t *bufioreq_evtchn) { if (use_default_ioreq_server) { - return xen_get_default_ioreq_server_info(xc, dom, ioreq_pfn, + return xen_get_default_ioreq_server_info(dom, ioreq_pfn, bufioreq_pfn, bufioreq_evtchn); } - return xc_hvm_get_ioreq_server_info(xc, dom, ioservid, - ioreq_pfn, bufioreq_pfn, - bufioreq_evtchn); + return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid, + ioreq_pfn, bufioreq_pfn, + bufioreq_evtchn); } -static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom, +static inline int xen_set_ioreq_server_state(domid_t dom, ioservid_t ioservid, bool enable) { @@ -379,12 +605,13 @@ static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom, } trace_xen_ioreq_server_state(ioservid, enable); - return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable); + return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid, + enable); } #endif -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid, unsigned int space, unsigned long idx, @@ -407,7 +634,7 @@ static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid #endif #ifdef CONFIG_XEN_PV_DOMAIN_BUILD -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40700 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref, xen_domain_handle_t handle, uint32_t flags, uint32_t *pdomid) @@ -426,7 +653,7 @@ static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref, /* Xen before 4.8 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 480 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800 typedef void *xengnttab_grant_copy_segment_t; |