From 7fe7791e3f652ac31ef98dcc94a8f2a317ab846b Mon Sep 17 00:00:00 2001 From: Laurent Vivier Date: Tue, 19 Oct 2021 09:15:31 +0200 Subject: failover: fix a regression introduced by JSON'ification of -device The hide_device helper can be called several times for the same devices as it shouldn't change any state and should only return an information. But not to rely anymore on QemuOpts we have introduced a new field to store the parameters of the device and don't allow to update it once it is done. And as the function is called several times, we ends with: warning: Cannot attach more than one primary device to 'virtio0' That is not only a warning as it prevents to hide the device and breaks failover. Fix that by checking the device id. Now, we fail only if the virtio-net device is really used by two different devices, for instance: -device virtio-net-pci,id=virtio0,failover=on,... \ -device vfio-pci,id=hostdev0,failover_pair_id=virtio0,... \ -device e1000e,id=e1000e0,failover_pair_id=virtio0,... \ will exit with: Cannot attach more than one primary device to 'virtio0': 'hostdev0' and 'e1000e0' Fixes: 259a10dbcb4f ("virtio-net: Store failover primary opts pointer locally") Cc: kwolf@redhat.com Signed-off-by: Laurent Vivier Message-Id: <20211019071532.682717-2-lvivier@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Kevin Wolf Reviewed-by: Kevin Wolf --- hw/net/virtio-net.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'hw/net/virtio-net.c') diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 09e173a558..83642c85b2 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -3304,15 +3304,27 @@ static bool failover_hide_primary_device(DeviceListener *listener, return false; } + /* + * The hide helper can be called several times for a given device. + * Check there is only one primary for a virtio-net device but + * don't duplicate the qdict several times if it's called for the same + * device. + */ if (n->primary_opts) { - error_setg(errp, "Cannot attach more than one primary device to '%s'", - n->netclient_name); - return false; + const char *old, *new; + /* devices with failover_pair_id always have an id */ + old = qdict_get_str(n->primary_opts, "id"); + new = qdict_get_str(device_opts, "id"); + if (strcmp(old, new) != 0) { + error_setg(errp, "Cannot attach more than one primary device to " + "'%s': '%s' and '%s'", n->netclient_name, old, new); + return false; + } + } else { + n->primary_opts = qdict_clone_shallow(device_opts); + n->primary_opts_from_json = from_json; } - n->primary_opts = qdict_clone_shallow(device_opts); - n->primary_opts_from_json = from_json; - /* failover_primary_hidden is set during feature negotiation */ return qatomic_read(&n->failover_primary_hidden); } -- cgit v1.2.3 From bcfc906be47a88803307cd6b665dc4c26fdd6dd2 Mon Sep 17 00:00:00 2001 From: Laurent Vivier Date: Tue, 19 Oct 2021 09:15:32 +0200 Subject: qdev/qbus: remove failover specific code Commit f3a850565693 ("qdev/qbus: add hidden device support") has introduced a generic way to hide a device but it has modified qdev_device_add() to check a specific option of the failover device, "failover_pair_id", before calling the generic mechanism. It's not needed (and not generic) to do that in qdev_device_add() because this is also checked by the failover_hide_primary_device() function that uses the generic mechanism to hide the device. Cc: Jens Freimann Signed-off-by: Laurent Vivier Message-Id: <20211019071532.682717-3-lvivier@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Kevin Wolf --- hw/net/virtio-net.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'hw/net/virtio-net.c') diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 83642c85b2..3dd2896ff9 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -3299,7 +3299,17 @@ static bool failover_hide_primary_device(DeviceListener *listener, if (!device_opts) { return false; } - standby_id = qdict_get_try_str(device_opts, "failover_pair_id"); + + if (!qdict_haskey(device_opts, "failover_pair_id")) { + return false; + } + + if (!qdict_haskey(device_opts, "id")) { + error_setg(errp, "Device with failover_pair_id needs to have id"); + return false; + } + + standby_id = qdict_get_str(device_opts, "failover_pair_id"); if (g_strcmp0(standby_id, n->netclient_name) != 0) { return false; } -- cgit v1.2.3 From 05ba3f63d174a716b42aa31c9af5d0ef64fff515 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 20 Oct 2021 12:55:56 +0800 Subject: vhost-net: control virtqueue support We assume there's no cvq in the past, this is not true when we need control virtqueue support for vhost-user backends. So this patch implements the control virtqueue support for vhost-net. As datapath, the control virtqueue is also required to be coupled with the NetClientState. The vhost_net_start/stop() are tweaked to accept the number of datapath queue pairs plus the the number of control virtqueue for us to start and stop the vhost device. Signed-off-by: Jason Wang Message-Id: <20211020045600.16082-7-jasowang@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/net/virtio-net.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'hw/net/virtio-net.c') diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 3dd2896ff9..5ee6729662 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -285,14 +285,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) } n->vhost_started = 1; - r = vhost_net_start(vdev, n->nic->ncs, queues); + r = vhost_net_start(vdev, n->nic->ncs, queues, 0); if (r < 0) { error_report("unable to start vhost net: %d: " "falling back on userspace virtio", -r); n->vhost_started = 0; } } else { - vhost_net_stop(vdev, n->nic->ncs, queues); + vhost_net_stop(vdev, n->nic->ncs, queues, 0); n->vhost_started = 0; } } -- cgit v1.2.3 From 441537f1ce0153978b4c9ee1cc4d4152147aa16f Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 20 Oct 2021 12:55:57 +0800 Subject: virtio-net: use "queue_pairs" instead of "queues" when possible Most of the time, "queues" really means queue pairs. So this patch switch to use "queue_pairs" to avoid confusion. Signed-off-by: Jason Wang Message-Id: <20211020045600.16082-8-jasowang@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/net/virtio-net.c | 150 ++++++++++++++++++++++++++-------------------------- 1 file changed, 75 insertions(+), 75 deletions(-) (limited to 'hw/net/virtio-net.c') diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 5ee6729662..7594f7ea92 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -54,7 +54,7 @@ #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256 -/* for now, only allow larger queues; with virtio-1, guest can downsize */ +/* for now, only allow larger queue_pairs; with virtio-1, guest can downsize */ #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE @@ -131,7 +131,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) int ret = 0; memset(&netcfg, 0 , sizeof(struct virtio_net_config)); virtio_stw_p(vdev, &netcfg.status, n->status); - virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues); + virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queue_pairs); virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu); memcpy(netcfg.mac, n->mac, ETH_ALEN); virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed); @@ -243,7 +243,7 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) { VirtIODevice *vdev = VIRTIO_DEVICE(n); NetClientState *nc = qemu_get_queue(n->nic); - int queues = n->multiqueue ? n->max_queues : 1; + int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; if (!get_vhost_net(nc->peer)) { return; @@ -266,7 +266,7 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) /* Any packets outstanding? Purge them to avoid touching rings * when vhost is running. */ - for (i = 0; i < queues; i++) { + for (i = 0; i < queue_pairs; i++) { NetClientState *qnc = qemu_get_subqueue(n->nic, i); /* Purge both directions: TX and RX. */ @@ -285,14 +285,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) } n->vhost_started = 1; - r = vhost_net_start(vdev, n->nic->ncs, queues, 0); + r = vhost_net_start(vdev, n->nic->ncs, queue_pairs, 0); if (r < 0) { error_report("unable to start vhost net: %d: " "falling back on userspace virtio", -r); n->vhost_started = 0; } } else { - vhost_net_stop(vdev, n->nic->ncs, queues, 0); + vhost_net_stop(vdev, n->nic->ncs, queue_pairs, 0); n->vhost_started = 0; } } @@ -309,11 +309,11 @@ static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev, } static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, - int queues, bool enable) + int queue_pairs, bool enable) { int i; - for (i = 0; i < queues; i++) { + for (i = 0; i < queue_pairs; i++) { if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 && enable) { while (--i >= 0) { @@ -330,7 +330,7 @@ static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status) { VirtIODevice *vdev = VIRTIO_DEVICE(n); - int queues = n->multiqueue ? n->max_queues : 1; + int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; if (virtio_net_started(n, status)) { /* Before using the device, we tell the network backend about the @@ -339,14 +339,14 @@ static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status) * virtio-net code. */ n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs, - queues, true); + queue_pairs, true); } else if (virtio_net_started(n, vdev->status)) { /* After using the device, we need to reset the network backend to * the default (guest native endianness), otherwise the guest may * lose network connectivity if it is rebooted into a different * endianness. */ - virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false); + virtio_net_set_vnet_endian(vdev, n->nic->ncs, queue_pairs, false); } } @@ -368,12 +368,12 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) virtio_net_vnet_endian_status(n, status); virtio_net_vhost_status(n, status); - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { NetClientState *ncs = qemu_get_subqueue(n->nic, i); bool queue_started; q = &n->vqs[i]; - if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { + if ((!n->multiqueue && i != 0) || i >= n->curr_queue_pairs) { queue_status = 0; } else { queue_status = status; @@ -540,7 +540,7 @@ static void virtio_net_reset(VirtIODevice *vdev) n->nouni = 0; n->nobcast = 0; /* multiqueue is disabled by default */ - n->curr_queues = 1; + n->curr_queue_pairs = 1; timer_del(n->announce_timer.tm); n->announce_timer.round = 0; n->status &= ~VIRTIO_NET_S_ANNOUNCE; @@ -556,7 +556,7 @@ static void virtio_net_reset(VirtIODevice *vdev) memset(n->vlans, 0, MAX_VLAN >> 3); /* Flush any async TX */ - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { NetClientState *nc = qemu_get_subqueue(n->nic, i); if (nc->peer) { @@ -610,7 +610,7 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, sizeof(struct virtio_net_hdr); } - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { nc = qemu_get_subqueue(n->nic, i); if (peer_has_vnet_hdr(n) && @@ -655,7 +655,7 @@ static int peer_attach(VirtIONet *n, int index) return 0; } - if (n->max_queues == 1) { + if (n->max_queue_pairs == 1) { return 0; } @@ -681,7 +681,7 @@ static int peer_detach(VirtIONet *n, int index) return tap_disable(nc->peer); } -static void virtio_net_set_queues(VirtIONet *n) +static void virtio_net_set_queue_pairs(VirtIONet *n) { int i; int r; @@ -690,8 +690,8 @@ static void virtio_net_set_queues(VirtIONet *n) return; } - for (i = 0; i < n->max_queues; i++) { - if (i < n->curr_queues) { + for (i = 0; i < n->max_queue_pairs; i++) { + if (i < n->curr_queue_pairs) { r = peer_attach(n, i); assert(!r); } else { @@ -905,7 +905,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) virtio_net_apply_guest_offloads(n); } - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { NetClientState *nc = qemu_get_subqueue(n->nic, i); if (!get_vhost_net(nc->peer)) { @@ -1232,7 +1232,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, VirtIODevice *vdev = VIRTIO_DEVICE(n); struct virtio_net_rss_config cfg; size_t s, offset = 0, size_get; - uint16_t queues, i; + uint16_t queue_pairs, i; struct { uint16_t us; uint8_t b; @@ -1274,7 +1274,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, } n->rss_data.default_queue = do_rss ? virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0; - if (n->rss_data.default_queue >= n->max_queues) { + if (n->rss_data.default_queue >= n->max_queue_pairs) { err_msg = "Invalid default queue"; err_value = n->rss_data.default_queue; goto error; @@ -1303,14 +1303,14 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, size_get = sizeof(temp); s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get); if (s != size_get) { - err_msg = "Can't get queues"; + err_msg = "Can't get queue_pairs"; err_value = (uint32_t)s; goto error; } - queues = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queues; - if (queues == 0 || queues > n->max_queues) { - err_msg = "Invalid number of queues"; - err_value = queues; + queue_pairs = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queue_pairs; + if (queue_pairs == 0 || queue_pairs > n->max_queue_pairs) { + err_msg = "Invalid number of queue_pairs"; + err_value = queue_pairs; goto error; } if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) { @@ -1325,7 +1325,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, } if (!temp.b && !n->rss_data.hash_types) { virtio_net_disable_rss(n); - return queues; + return queue_pairs; } offset += size_get; size_get = temp.b; @@ -1358,7 +1358,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, trace_virtio_net_rss_enable(n->rss_data.hash_types, n->rss_data.indirections_len, temp.b); - return queues; + return queue_pairs; error: trace_virtio_net_rss_error(err_msg, err_value); virtio_net_disable_rss(n); @@ -1369,15 +1369,15 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, struct iovec *iov, unsigned int iov_cnt) { VirtIODevice *vdev = VIRTIO_DEVICE(n); - uint16_t queues; + uint16_t queue_pairs; virtio_net_disable_rss(n); if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) { - queues = virtio_net_handle_rss(n, iov, iov_cnt, false); - return queues ? VIRTIO_NET_OK : VIRTIO_NET_ERR; + queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, false); + return queue_pairs ? VIRTIO_NET_OK : VIRTIO_NET_ERR; } if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) { - queues = virtio_net_handle_rss(n, iov, iov_cnt, true); + queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, true); } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { struct virtio_net_ctrl_mq mq; size_t s; @@ -1388,24 +1388,24 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, if (s != sizeof(mq)) { return VIRTIO_NET_ERR; } - queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs); + queue_pairs = virtio_lduw_p(vdev, &mq.virtqueue_pairs); } else { return VIRTIO_NET_ERR; } - if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || - queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || - queues > n->max_queues || + if (queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || + queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || + queue_pairs > n->max_queue_pairs || !n->multiqueue) { return VIRTIO_NET_ERR; } - n->curr_queues = queues; - /* stop the backend before changing the number of queues to avoid handling a + n->curr_queue_pairs = queue_pairs; + /* stop the backend before changing the number of queue_pairs to avoid handling a * disabled queue */ virtio_net_set_status(vdev, vdev->status); - virtio_net_set_queues(n); + virtio_net_set_queue_pairs(n); return VIRTIO_NET_OK; } @@ -1483,7 +1483,7 @@ static bool virtio_net_can_receive(NetClientState *nc) return false; } - if (nc->queue_index >= n->curr_queues) { + if (nc->queue_index >= n->curr_queue_pairs) { return false; } @@ -2763,11 +2763,11 @@ static void virtio_net_del_queue(VirtIONet *n, int index) virtio_del_queue(vdev, index * 2 + 1); } -static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) +static void virtio_net_change_num_queue_pairs(VirtIONet *n, int new_max_queue_pairs) { VirtIODevice *vdev = VIRTIO_DEVICE(n); int old_num_queues = virtio_get_num_queues(vdev); - int new_num_queues = new_max_queues * 2 + 1; + int new_num_queues = new_max_queue_pairs * 2 + 1; int i; assert(old_num_queues >= 3); @@ -2800,12 +2800,12 @@ static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) { - int max = multiqueue ? n->max_queues : 1; + int max = multiqueue ? n->max_queue_pairs : 1; n->multiqueue = multiqueue; - virtio_net_change_num_queues(n, max); + virtio_net_change_num_queue_pairs(n, max); - virtio_net_set_queues(n); + virtio_net_set_queue_pairs(n); } static int virtio_net_post_load_device(void *opaque, int version_id) @@ -2838,7 +2838,7 @@ static int virtio_net_post_load_device(void *opaque, int version_id) */ n->saved_guest_offloads = n->curr_guest_offloads; - virtio_net_set_queues(n); + virtio_net_set_queue_pairs(n); /* Find the first multicast entry in the saved MAC filter */ for (i = 0; i < n->mac_table.in_use; i++) { @@ -2851,7 +2851,7 @@ static int virtio_net_post_load_device(void *opaque, int version_id) /* nc.link_down can't be migrated, so infer link_down according * to link status bit in n->status */ link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { qemu_get_subqueue(n->nic, i)->link_down = link_down; } @@ -2916,9 +2916,9 @@ static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = { }, }; -static bool max_queues_gt_1(void *opaque, int version_id) +static bool max_queue_pairs_gt_1(void *opaque, int version_id) { - return VIRTIO_NET(opaque)->max_queues > 1; + return VIRTIO_NET(opaque)->max_queue_pairs > 1; } static bool has_ctrl_guest_offloads(void *opaque, int version_id) @@ -2943,13 +2943,13 @@ static bool mac_table_doesnt_fit(void *opaque, int version_id) struct VirtIONetMigTmp { VirtIONet *parent; VirtIONetQueue *vqs_1; - uint16_t curr_queues_1; + uint16_t curr_queue_pairs_1; uint8_t has_ufo; uint32_t has_vnet_hdr; }; /* The 2nd and subsequent tx_waiting flags are loaded later than - * the 1st entry in the queues and only if there's more than one + * the 1st entry in the queue_pairs and only if there's more than one * entry. We use the tmp mechanism to calculate a temporary * pointer and count and also validate the count. */ @@ -2959,9 +2959,9 @@ static int virtio_net_tx_waiting_pre_save(void *opaque) struct VirtIONetMigTmp *tmp = opaque; tmp->vqs_1 = tmp->parent->vqs + 1; - tmp->curr_queues_1 = tmp->parent->curr_queues - 1; - if (tmp->parent->curr_queues == 0) { - tmp->curr_queues_1 = 0; + tmp->curr_queue_pairs_1 = tmp->parent->curr_queue_pairs - 1; + if (tmp->parent->curr_queue_pairs == 0) { + tmp->curr_queue_pairs_1 = 0; } return 0; @@ -2974,9 +2974,9 @@ static int virtio_net_tx_waiting_pre_load(void *opaque) /* Reuse the pointer setup from save */ virtio_net_tx_waiting_pre_save(opaque); - if (tmp->parent->curr_queues > tmp->parent->max_queues) { - error_report("virtio-net: curr_queues %x > max_queues %x", - tmp->parent->curr_queues, tmp->parent->max_queues); + if (tmp->parent->curr_queue_pairs > tmp->parent->max_queue_pairs) { + error_report("virtio-net: curr_queue_pairs %x > max_queue_pairs %x", + tmp->parent->curr_queue_pairs, tmp->parent->max_queue_pairs); return -EINVAL; } @@ -2990,7 +2990,7 @@ static const VMStateDescription vmstate_virtio_net_tx_waiting = { .pre_save = virtio_net_tx_waiting_pre_save, .fields = (VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp, - curr_queues_1, + curr_queue_pairs_1, vmstate_virtio_net_queue_tx_waiting, struct VirtIONetQueue), VMSTATE_END_OF_LIST() @@ -3132,9 +3132,9 @@ static const VMStateDescription vmstate_virtio_net_device = { VMSTATE_UINT8(nobcast, VirtIONet), VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, vmstate_virtio_net_has_ufo), - VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0, + VMSTATE_SINGLE_TEST(max_queue_pairs, VirtIONet, max_queue_pairs_gt_1, 0, vmstate_info_uint16_equal, uint16_t), - VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1), + VMSTATE_UINT16_TEST(curr_queue_pairs, VirtIONet, max_queue_pairs_gt_1), VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, vmstate_virtio_net_tx_waiting), VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet, @@ -3411,16 +3411,16 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) return; } - n->max_queues = MAX(n->nic_conf.peers.queues, 1); - if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) { - error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " + n->max_queue_pairs = MAX(n->nic_conf.peers.queues, 1); + if (n->max_queue_pairs * 2 + 1 > VIRTIO_QUEUE_MAX) { + error_setg(errp, "Invalid number of queue_pairs (= %" PRIu32 "), " "must be a positive integer less than %d.", - n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2); + n->max_queue_pairs, (VIRTIO_QUEUE_MAX - 1) / 2); virtio_cleanup(vdev); return; } - n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); - n->curr_queues = 1; + n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queue_pairs); + n->curr_queue_pairs = 1; n->tx_timeout = n->net_conf.txtimer; if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") @@ -3434,7 +3434,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n), n->net_conf.tx_queue_size); - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { virtio_net_add_queue(n, i); } @@ -3458,13 +3458,13 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) object_get_typename(OBJECT(dev)), dev->id, n); } - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { n->nic->ncs[i].do_not_pad = true; } peer_test_vnet_hdr(n); if (peer_has_vnet_hdr(n)) { - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); } n->host_hdr_len = sizeof(struct virtio_net_hdr); @@ -3506,7 +3506,7 @@ static void virtio_net_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIONet *n = VIRTIO_NET(dev); - int i, max_queues; + int i, max_queue_pairs; if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) { virtio_net_unload_ebpf(n); @@ -3531,12 +3531,12 @@ static void virtio_net_device_unrealize(DeviceState *dev) assert(n->primary_opts == NULL); } - max_queues = n->multiqueue ? n->max_queues : 1; - for (i = 0; i < max_queues; i++) { + max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; + for (i = 0; i < max_queue_pairs; i++) { virtio_net_del_queue(n, i); } /* delete also control vq */ - virtio_del_queue(vdev, max_queues * 2); + virtio_del_queue(vdev, max_queue_pairs * 2); qemu_announce_timer_del(&n->announce_timer, false); g_free(n->vqs); qemu_del_nic(n->nic); -- cgit v1.2.3 From 22288fe5a3f2dc4cb5c8826646d466019ad67682 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 20 Oct 2021 12:55:59 +0800 Subject: virtio-net: vhost control virtqueue support This patch implements the control virtqueue support for vhost. This requires virtio-net to figure out the datapath queue pairs and control virtqueue via is_datapath and pass the number of those two types of virtqueues to vhost_net_start()/vhost_net_stop(). Signed-off-by: Jason Wang Message-Id: <20211020045600.16082-10-jasowang@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/net/virtio-net.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) (limited to 'hw/net/virtio-net.c') diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 7594f7ea92..f2014d5ea0 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -244,6 +244,7 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) VirtIODevice *vdev = VIRTIO_DEVICE(n); NetClientState *nc = qemu_get_queue(n->nic); int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; + int cvq = n->max_ncs - n->max_queue_pairs; if (!get_vhost_net(nc->peer)) { return; @@ -285,14 +286,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) } n->vhost_started = 1; - r = vhost_net_start(vdev, n->nic->ncs, queue_pairs, 0); + r = vhost_net_start(vdev, n->nic->ncs, queue_pairs, cvq); if (r < 0) { error_report("unable to start vhost net: %d: " "falling back on userspace virtio", -r); n->vhost_started = 0; } } else { - vhost_net_stop(vdev, n->nic->ncs, queue_pairs, 0); + vhost_net_stop(vdev, n->nic->ncs, queue_pairs, cvq); n->vhost_started = 0; } } @@ -3411,9 +3412,23 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) return; } - n->max_queue_pairs = MAX(n->nic_conf.peers.queues, 1); + n->max_ncs = MAX(n->nic_conf.peers.queues, 1); + + /* + * Figure out the datapath queue pairs since the backend could + * provide control queue via peers as well. + */ + if (n->nic_conf.peers.queues) { + for (i = 0; i < n->max_ncs; i++) { + if (n->nic_conf.peers.ncs[i]->is_datapath) { + ++n->max_queue_pairs; + } + } + } + n->max_queue_pairs = MAX(n->max_queue_pairs, 1); + if (n->max_queue_pairs * 2 + 1 > VIRTIO_QUEUE_MAX) { - error_setg(errp, "Invalid number of queue_pairs (= %" PRIu32 "), " + error_setg(errp, "Invalid number of queue pairs (= %" PRIu32 "), " "must be a positive integer less than %d.", n->max_queue_pairs, (VIRTIO_QUEUE_MAX - 1) / 2); virtio_cleanup(vdev); -- cgit v1.2.3