aboutsummaryrefslogtreecommitdiff
path: root/contrib/libvhost-user/libvhost-user.c
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2019-06-26 08:48:13 +0100
committerMichael S. Tsirkin <mst@redhat.com>2019-07-04 17:00:32 -0400
commit6f5fd837889814e57a4bb473bf80ce08e355a12d (patch)
tree94a9980f0db7259a68621d91d65cc5a83477663d /contrib/libvhost-user/libvhost-user.c
parentdb68f4ff06cbe0517ed0d9b5634f6cddaed2547c (diff)
libvhost-user: support many virtqueues
Currently libvhost-user is hardcoded to at most 8 virtqueues. The device backend should decide the number of virtqueues, not libvhost-user. This is important for multiqueue device backends where the guest driver needs an accurate number of virtqueues. This change breaks libvhost-user and libvhost-user-glib API stability. There is no stability guarantee yet, so make this change now and update all in-tree library users. This patch touches up vhost-user-blk, vhost-user-gpu, vhost-user-input, vhost-user-scsi, and vhost-user-bridge. If the device has a fixed number of queues that exact number is used. Otherwise the previous default of 8 virtqueues is used. vu_init() and vug_init() can now fail if malloc() returns NULL. I considered aborting with an error in libvhost-user but it should be safe to instantiate new vhost-user instances at runtime without risk of terminating the process. Therefore callers need to handle the vu_init() failure now. vhost-user-blk and vhost-user-scsi duplicate virtqueue index checks that are already performed by libvhost-user. This code would need to be modified to use max_queues but remove it completely instead since it's redundant. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> Message-Id: <20190626074815.19994-3-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'contrib/libvhost-user/libvhost-user.c')
-rw-r--r--contrib/libvhost-user/libvhost-user.c33
1 files changed, 24 insertions, 9 deletions
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index a8657c7af2..90bea856dd 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -493,9 +493,9 @@ vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
static void
vu_set_enable_all_rings(VuDev *dev, bool enabled)
{
- int i;
+ uint16_t i;
- for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
+ for (i = 0; i < dev->max_queues; i++) {
dev->vq[i].enable = enabled;
}
}
@@ -916,7 +916,7 @@ vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg)
{
int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
- if (index >= VHOST_MAX_NR_VIRTQUEUE) {
+ if (index >= dev->max_queues) {
vmsg_close_fds(vmsg);
vu_panic(dev, "Invalid queue index: %u", index);
return false;
@@ -1213,7 +1213,7 @@ vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
DPRINT("State.index: %d\n", index);
DPRINT("State.enable: %d\n", enable);
- if (index >= VHOST_MAX_NR_VIRTQUEUE) {
+ if (index >= dev->max_queues) {
vu_panic(dev, "Invalid vring_enable index: %u", index);
return false;
}
@@ -1582,7 +1582,7 @@ vu_deinit(VuDev *dev)
}
dev->nregions = 0;
- for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
+ for (i = 0; i < dev->max_queues; i++) {
VuVirtq *vq = &dev->vq[i];
if (vq->call_fd != -1) {
@@ -1627,18 +1627,23 @@ vu_deinit(VuDev *dev)
if (dev->sock != -1) {
close(dev->sock);
}
+
+ free(dev->vq);
+ dev->vq = NULL;
}
-void
+bool
vu_init(VuDev *dev,
+ uint16_t max_queues,
int socket,
vu_panic_cb panic,
vu_set_watch_cb set_watch,
vu_remove_watch_cb remove_watch,
const VuDevIface *iface)
{
- int i;
+ uint16_t i;
+ assert(max_queues > 0);
assert(socket >= 0);
assert(set_watch);
assert(remove_watch);
@@ -1654,18 +1659,28 @@ vu_init(VuDev *dev,
dev->iface = iface;
dev->log_call_fd = -1;
dev->slave_fd = -1;
- for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
+ dev->max_queues = max_queues;
+
+ dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
+ if (!dev->vq) {
+ DPRINT("%s: failed to malloc virtqueues\n", __func__);
+ return false;
+ }
+
+ for (i = 0; i < max_queues; i++) {
dev->vq[i] = (VuVirtq) {
.call_fd = -1, .kick_fd = -1, .err_fd = -1,
.notification = true,
};
}
+
+ return true;
}
VuVirtq *
vu_get_queue(VuDev *dev, int qidx)
{
- assert(qidx < VHOST_MAX_NR_VIRTQUEUE);
+ assert(qidx < dev->max_queues);
return &dev->vq[qidx];
}