From 0e6bff0d43bf04c6e7a16c2775879816ca056b3d Mon Sep 17 00:00:00 2001 From: Hawkins Jiawei Date: Fri, 13 Oct 2023 16:09:36 +0800 Subject: vdpa: Use iovec for vhost_vdpa_net_cvq_add() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Next patches in this series will no longer perform an immediate poll and check of the device's used buffers for each CVQ state load command. Consequently, there will be multiple pending buffers in the shadow VirtQueue, making it a must for every control command to have its own buffer. To achieve this, this patch refactor vhost_vdpa_net_cvq_add() to accept `struct iovec`, which eliminates the coupling of control commands to `s->cvq_cmd_out_buffer` and `s->status`, allowing them to use their own buffer. Signed-off-by: Hawkins Jiawei Acked-by: Eugenio Pérez Message-Id: <8a328f146fb043f34edb75ba6d043d2d6de88f99.1697165821.git.yin31149@gmail.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- net/vhost-vdpa.c | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) (limited to 'net/vhost-vdpa.c') diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 939c984d5b..618758596a 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -618,22 +618,14 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc) vhost_vdpa_net_client_stop(nc); } -static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len, - size_t in_len) +static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, + const struct iovec *out_sg, size_t out_num, + const struct iovec *in_sg, size_t in_num) { - /* Buffers for the device */ - const struct iovec out = { - .iov_base = s->cvq_cmd_out_buffer, - .iov_len = out_len, - }; - const struct iovec in = { - .iov_base = s->status, - .iov_len = sizeof(virtio_net_ctrl_ack), - }; VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); int r; - r = vhost_svq_add(svq, &out, 1, &in, 1, NULL); + r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL); if (unlikely(r != 0)) { if (unlikely(r == -ENOSPC)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", @@ -659,6 +651,15 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, .cmd = cmd, }; size_t data_size = iov_size(data_sg, data_num); + /* Buffers for the device */ + const struct iovec out = { + .iov_base = s->cvq_cmd_out_buffer, + .iov_len = sizeof(ctrl) + data_size, + }; + const struct iovec in = { + .iov_base = s->status, + .iov_len = sizeof(*s->status), + }; assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); @@ -669,8 +670,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, iov_to_buf(data_sg, data_num, 0, s->cvq_cmd_out_buffer + sizeof(ctrl), data_size); - return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl), - sizeof(virtio_net_ctrl_ack)); + return vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); } static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) @@ -1248,10 +1248,15 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, .iov_base = s->cvq_cmd_out_buffer, }; /* in buffer used for device model */ - const struct iovec in = { + const struct iovec model_in = { .iov_base = &status, .iov_len = sizeof(status), }; + /* in buffer used for vdpa device */ + const struct iovec vdpa_in = { + .iov_base = s->status, + .iov_len = sizeof(*s->status), + }; ssize_t dev_written = -EINVAL; out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, @@ -1285,7 +1290,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, goto out; } } else { - dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status)); + dev_written = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1); if (unlikely(dev_written < 0)) { goto out; } @@ -1301,7 +1306,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, } status = VIRTIO_NET_ERR; - virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1); + virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1); if (status != VIRTIO_NET_OK) { error_report("Bad CVQ processing in model"); } -- cgit v1.2.3 From 327dedb8df91f57ef917ab5b5db519146ee6f08b Mon Sep 17 00:00:00 2001 From: Hawkins Jiawei Date: Fri, 13 Oct 2023 16:09:37 +0800 Subject: vdpa: Avoid using vhost_vdpa_net_load_*() outside vhost_vdpa_net_load() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Next patches in this series will refactor vhost_vdpa_net_load_cmd() to iterate through the control commands shadow buffers, allowing QEMU to send CVQ state load commands in parallel at device startup. Considering that QEMU always forwards the CVQ command serialized outside of vhost_vdpa_net_load(), it is more elegant to send the CVQ commands directly without invoking vhost_vdpa_net_load_*() helpers. Signed-off-by: Hawkins Jiawei Acked-by: Eugenio Pérez Message-Id: <254f0618efde7af7229ba4fdada667bb9d318991.1697165821.git.yin31149@gmail.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- net/vhost-vdpa.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'net/vhost-vdpa.c') diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 618758596a..86b8d31244 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -1114,12 +1114,14 @@ static NetClientInfo net_vhost_vdpa_cvq_info = { */ static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, VirtQueueElement *elem, - struct iovec *out) + struct iovec *out, + const struct iovec *in) { struct virtio_net_ctrl_mac mac_data, *mac_ptr; struct virtio_net_ctrl_hdr *hdr_ptr; uint32_t cursor; ssize_t r; + uint8_t on = 1; /* parse the non-multicast MAC address entries from CVQ command */ cursor = sizeof(*hdr_ptr); @@ -1167,7 +1169,13 @@ static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, * filter table to the vdpa device, it should send the * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode */ - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1); + hdr_ptr = out->iov_base; + out->iov_len = sizeof(*hdr_ptr) + sizeof(on); + + hdr_ptr->class = VIRTIO_NET_CTRL_RX; + hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC; + iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on)); + r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1); if (unlikely(r < 0)) { return r; } @@ -1285,7 +1293,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, * the CVQ command directly. */ dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem, - &out); + &out, &vdpa_in); if (unlikely(dev_written < 0)) { goto out; } -- cgit v1.2.3 From 24e59cfe0cb53416b06c2c117bc22ff22dc54df3 Mon Sep 17 00:00:00 2001 From: Hawkins Jiawei Date: Fri, 13 Oct 2023 16:09:38 +0800 Subject: vdpa: Check device ack in vhost_vdpa_net_load_rx_mode() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Considering that vhost_vdpa_net_load_rx_mode() is only called within vhost_vdpa_net_load_rx() now, this patch refactors vhost_vdpa_net_load_rx_mode() to include a check for the device's ack, simplifying the code and improving its maintainability. Signed-off-by: Hawkins Jiawei Acked-by: Eugenio Pérez Message-Id: <68811d52f96ae12d68f0d67d996ac1642a623943.1697165821.git.yin31149@gmail.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- net/vhost-vdpa.c | 76 +++++++++++++++++++++++--------------------------------- 1 file changed, 31 insertions(+), 45 deletions(-) (limited to 'net/vhost-vdpa.c') diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 86b8d31244..36a4e57c0d 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -827,14 +827,24 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, .iov_base = &on, .iov_len = sizeof(on), }; - return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX, - cmd, &data, 1); + ssize_t dev_written; + + dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX, + cmd, &data, 1); + if (unlikely(dev_written < 0)) { + return dev_written; + } + if (*s->status != VIRTIO_NET_OK) { + return -EIO; + } + + return 0; } static int vhost_vdpa_net_load_rx(VhostVDPAState *s, const VirtIONet *n) { - ssize_t dev_written; + ssize_t r; if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) { return 0; @@ -859,13 +869,9 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, * configuration only at live migration. */ if (!n->mac_table.uni_overflow && !n->promisc) { - dev_written = vhost_vdpa_net_load_rx_mode(s, - VIRTIO_NET_CTRL_RX_PROMISC, 0); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 0); + if (unlikely(r < 0)) { + return r; } } @@ -887,13 +893,9 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, * configuration only at live migration. */ if (n->mac_table.multi_overflow || n->allmulti) { - dev_written = vhost_vdpa_net_load_rx_mode(s, - VIRTIO_NET_CTRL_RX_ALLMULTI, 1); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLMULTI, 1); + if (unlikely(r < 0)) { + return r; } } @@ -912,13 +914,9 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, * configuration only at live migration. */ if (n->alluni) { - dev_written = vhost_vdpa_net_load_rx_mode(s, - VIRTIO_NET_CTRL_RX_ALLUNI, 1); - if (dev_written < 0) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLUNI, 1); + if (r < 0) { + return r; } } @@ -933,13 +931,9 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, * configuration only at live migration. */ if (n->nomulti) { - dev_written = vhost_vdpa_net_load_rx_mode(s, - VIRTIO_NET_CTRL_RX_NOMULTI, 1); - if (dev_written < 0) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOMULTI, 1); + if (r < 0) { + return r; } } @@ -954,13 +948,9 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, * configuration only at live migration. */ if (n->nouni) { - dev_written = vhost_vdpa_net_load_rx_mode(s, - VIRTIO_NET_CTRL_RX_NOUNI, 1); - if (dev_written < 0) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOUNI, 1); + if (r < 0) { + return r; } } @@ -975,13 +965,9 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, * configuration only at live migration. */ if (n->nobcast) { - dev_written = vhost_vdpa_net_load_rx_mode(s, - VIRTIO_NET_CTRL_RX_NOBCAST, 1); - if (dev_written < 0) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOBCAST, 1); + if (r < 0) { + return r; } } -- cgit v1.2.3 From a864a3219d7e63569583d204c12bff2a0f90463e Mon Sep 17 00:00:00 2001 From: Hawkins Jiawei Date: Fri, 13 Oct 2023 16:09:39 +0800 Subject: vdpa: Move vhost_svq_poll() to the caller of vhost_vdpa_net_cvq_add() This patch moves vhost_svq_poll() to the caller of vhost_vdpa_net_cvq_add() and introduces a helper funtion. By making this change, next patches in this series is able to refactor vhost_vdpa_net_load_x() only to delay the polling and checking process until either the SVQ is full or control commands shadow buffers are full. Signed-off-by: Hawkins Jiawei Message-Id: <196cadb55175a75275660c6634a538289f027ae3.1697165821.git.yin31149@gmail.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- net/vhost-vdpa.c | 53 +++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 10 deletions(-) (limited to 'net/vhost-vdpa.c') diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 36a4e57c0d..ea73e3c410 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -631,15 +631,21 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", __func__); } - return r; } - /* - * We can poll here since we've had BQL from the time we sent the - * descriptor. Also, we need to take the answer before SVQ pulls by itself, - * when BQL is released - */ - return vhost_svq_poll(svq, 1); + return r; +} + +/* + * Convenience wrapper to poll SVQ for multiple control commands. + * + * Caller should hold the BQL when invoking this function, and should take + * the answer before SVQ pulls by itself when BQL is released. + */ +static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight) +{ + VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); + return vhost_svq_poll(svq, cmds_in_flight); } static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, @@ -660,6 +666,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, .iov_base = s->status, .iov_len = sizeof(*s->status), }; + ssize_t r; assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); @@ -670,7 +677,16 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, iov_to_buf(data_sg, data_num, 0, s->cvq_cmd_out_buffer + sizeof(ctrl), data_size); - return vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); + r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); + if (unlikely(r < 0)) { + return r; + } + + /* + * We can poll here since we've had BQL from the time + * we sent the descriptor. + */ + return vhost_vdpa_net_svq_poll(s, 1); } static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) @@ -1165,6 +1181,15 @@ static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, if (unlikely(r < 0)) { return r; } + + /* + * We can poll here since we've had BQL from the time + * we sent the descriptor. + */ + r = vhost_vdpa_net_svq_poll(s, 1); + if (unlikely(r < sizeof(*s->status))) { + return r; + } if (*s->status != VIRTIO_NET_OK) { return sizeof(*s->status); } @@ -1284,10 +1309,18 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, goto out; } } else { - dev_written = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1); - if (unlikely(dev_written < 0)) { + ssize_t r; + r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1); + if (unlikely(r < 0)) { + dev_written = r; goto out; } + + /* + * We can poll here since we've had BQL from the time + * we sent the descriptor. + */ + dev_written = vhost_vdpa_net_svq_poll(s, 1); } if (unlikely(dev_written < sizeof(status))) { -- cgit v1.2.3 From 1d7e2a8fd4996fdb20d74fce41fe897311f3b06a Mon Sep 17 00:00:00 2001 From: Hawkins Jiawei Date: Fri, 13 Oct 2023 16:09:40 +0800 Subject: vdpa: Introduce cursors to vhost_vdpa_net_loadx() This patch introduces two new arugments, `out_cursor` and `in_cursor`, to vhost_vdpa_net_loadx(). Addtionally, it includes a helper function vhost_vdpa_net_load_cursor_reset() for resetting these cursors. Furthermore, this patch refactors vhost_vdpa_net_load_cmd() so that vhost_vdpa_net_load_cmd() prepares buffers for the device using the cursors arguments, instead of directly accesses `s->cvq_cmd_out_buffer` and `s->status` fields. By making these change, next patches in this series can refactor vhost_vdpa_net_load_cmd() directly to iterate through the control commands shadow buffers, allowing QEMU to send CVQ state load commands in parallel at device startup. Signed-off-by: Hawkins Jiawei Message-Id: <1c6516e233a14cc222f0884e148e4e1adceda78d.1697165821.git.yin31149@gmail.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- net/vhost-vdpa.c | 111 +++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 75 insertions(+), 36 deletions(-) (limited to 'net/vhost-vdpa.c') diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index ea73e3c410..ef4d242811 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -648,7 +648,22 @@ static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight) return vhost_svq_poll(svq, cmds_in_flight); } -static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, +static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, + struct iovec *out_cursor, + struct iovec *in_cursor) +{ + /* reset the cursor of the output buffer for the device */ + out_cursor->iov_base = s->cvq_cmd_out_buffer; + out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); + + /* reset the cursor of the in buffer for the device */ + in_cursor->iov_base = s->status; + in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); +} + +static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, + struct iovec *out_cursor, + struct iovec *in_cursor, uint8_t class, uint8_t cmd, const struct iovec *data_sg, size_t data_num) { @@ -657,25 +672,21 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, .cmd = cmd, }; size_t data_size = iov_size(data_sg, data_num); - /* Buffers for the device */ - const struct iovec out = { - .iov_base = s->cvq_cmd_out_buffer, - .iov_len = sizeof(ctrl) + data_size, - }; - const struct iovec in = { - .iov_base = s->status, - .iov_len = sizeof(*s->status), - }; + struct iovec out, in; ssize_t r; assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); /* pack the CVQ command header */ - memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl)); - + iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl)); /* pack the CVQ command command-specific-data */ iov_to_buf(data_sg, data_num, 0, - s->cvq_cmd_out_buffer + sizeof(ctrl), data_size); + out_cursor->iov_base + sizeof(ctrl), data_size); + + /* extract the required buffer from the cursor for output */ + iov_copy(&out, 1, out_cursor, 1, 0, sizeof(ctrl) + data_size); + /* extract the required buffer from the cursor for input */ + iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); if (unlikely(r < 0)) { @@ -689,14 +700,17 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, return vhost_vdpa_net_svq_poll(s, 1); } -static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) +static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, + struct iovec *out_cursor, + struct iovec *in_cursor) { if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) { const struct iovec data = { .iov_base = (void *)n->mac, .iov_len = sizeof(n->mac), }; - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC, + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_ADDR_SET, &data, 1); if (unlikely(dev_written < 0)) { @@ -748,7 +762,7 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) .iov_len = mul_macs_size, }, }; - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, data, ARRAY_SIZE(data)); @@ -763,7 +777,9 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) } static int vhost_vdpa_net_load_mq(VhostVDPAState *s, - const VirtIONet *n) + const VirtIONet *n, + struct iovec *out_cursor, + struct iovec *in_cursor) { struct virtio_net_ctrl_mq mq; ssize_t dev_written; @@ -777,7 +793,8 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, .iov_base = &mq, .iov_len = sizeof(mq), }; - dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ, + dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &data, 1); if (unlikely(dev_written < 0)) { @@ -791,7 +808,9 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, } static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, - const VirtIONet *n) + const VirtIONet *n, + struct iovec *out_cursor, + struct iovec *in_cursor) { uint64_t offloads; ssize_t dev_written; @@ -822,7 +841,8 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, .iov_base = &offloads, .iov_len = sizeof(offloads), }; - dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS, + dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &data, 1); if (unlikely(dev_written < 0)) { @@ -836,6 +856,8 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, } static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, + struct iovec *out_cursor, + struct iovec *in_cursor, uint8_t cmd, uint8_t on) { @@ -845,7 +867,8 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, }; ssize_t dev_written; - dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX, + dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_RX, cmd, &data, 1); if (unlikely(dev_written < 0)) { return dev_written; @@ -858,7 +881,9 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, } static int vhost_vdpa_net_load_rx(VhostVDPAState *s, - const VirtIONet *n) + const VirtIONet *n, + struct iovec *out_cursor, + struct iovec *in_cursor) { ssize_t r; @@ -885,7 +910,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, * configuration only at live migration. */ if (!n->mac_table.uni_overflow && !n->promisc) { - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 0); + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_RX_PROMISC, 0); if (unlikely(r < 0)) { return r; } @@ -909,7 +935,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, * configuration only at live migration. */ if (n->mac_table.multi_overflow || n->allmulti) { - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLMULTI, 1); + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_RX_ALLMULTI, 1); if (unlikely(r < 0)) { return r; } @@ -930,7 +957,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, * configuration only at live migration. */ if (n->alluni) { - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLUNI, 1); + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_RX_ALLUNI, 1); if (r < 0) { return r; } @@ -947,7 +975,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, * configuration only at live migration. */ if (n->nomulti) { - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOMULTI, 1); + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_RX_NOMULTI, 1); if (r < 0) { return r; } @@ -964,7 +993,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, * configuration only at live migration. */ if (n->nouni) { - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOUNI, 1); + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_RX_NOUNI, 1); if (r < 0) { return r; } @@ -981,7 +1011,8 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, * configuration only at live migration. */ if (n->nobcast) { - r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOBCAST, 1); + r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_RX_NOBCAST, 1); if (r < 0) { return r; } @@ -992,13 +1023,16 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, const VirtIONet *n, + struct iovec *out_cursor, + struct iovec *in_cursor, uint16_t vid) { const struct iovec data = { .iov_base = &vid, .iov_len = sizeof(vid), }; - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN, + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &data, 1); if (unlikely(dev_written < 0)) { @@ -1012,7 +1046,9 @@ static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, } static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, - const VirtIONet *n) + const VirtIONet *n, + struct iovec *out_cursor, + struct iovec *in_cursor) { int r; @@ -1023,7 +1059,8 @@ static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, for (int i = 0; i < MAX_VLAN >> 5; i++) { for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { if (n->vlans[i] & (1U << j)) { - r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j); + r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor, + in_cursor, (i << 5) + j); if (unlikely(r != 0)) { return r; } @@ -1040,6 +1077,7 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc) struct vhost_vdpa *v = &s->vhost_vdpa; const VirtIONet *n; int r; + struct iovec out_cursor, in_cursor; assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); @@ -1047,23 +1085,24 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc) if (v->shadow_vqs_enabled) { n = VIRTIO_NET(v->dev->vdev); - r = vhost_vdpa_net_load_mac(s, n); + vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor); + r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor); if (unlikely(r < 0)) { return r; } - r = vhost_vdpa_net_load_mq(s, n); + r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor); if (unlikely(r)) { return r; } - r = vhost_vdpa_net_load_offloads(s, n); + r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor); if (unlikely(r)) { return r; } - r = vhost_vdpa_net_load_rx(s, n); + r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor); if (unlikely(r)) { return r; } - r = vhost_vdpa_net_load_vlan(s, n); + r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor); if (unlikely(r)) { return r; } -- cgit v1.2.3 From acec5f685c7ad6bd3c9bb9a57d4e509160480376 Mon Sep 17 00:00:00 2001 From: Hawkins Jiawei Date: Fri, 13 Oct 2023 16:09:42 +0800 Subject: vdpa: Send cvq state load commands in parallel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch enables sending CVQ state load commands in parallel at device startup by following steps: * Refactor vhost_vdpa_net_load_cmd() to iterate through the control commands shadow buffers. This allows different CVQ state load commands to use their own unique buffers. * Delay the polling and checking of buffers until either the SVQ is full or control commands shadow buffers are full. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1578 Signed-off-by: Hawkins Jiawei Acked-by: Eugenio Pérez Message-Id: <9350f32278e39f7bce297b8f2d82dac27c6f8c9a.1697165821.git.yin31149@gmail.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- net/vhost-vdpa.c | 165 ++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 102 insertions(+), 63 deletions(-) (limited to 'net/vhost-vdpa.c') diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index ef4d242811..4b7c3b81b8 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -661,6 +661,31 @@ static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); } +/* + * Poll SVQ for multiple pending control commands and check the device's ack. + * + * Caller should hold the BQL when invoking this function. + * + * @s: The VhostVDPAState + * @len: The length of the pending status shadow buffer + */ +static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len) +{ + /* device uses a one-byte length ack for each control command */ + ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len); + if (unlikely(dev_written != len)) { + return -EIO; + } + + /* check the device's ack */ + for (int i = 0; i < len; ++i) { + if (s->status[i] != VIRTIO_NET_OK) { + return -EIO; + } + } + return 0; +} + static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, struct iovec *out_cursor, struct iovec *in_cursor, uint8_t class, @@ -671,11 +696,31 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, .class = class, .cmd = cmd, }; - size_t data_size = iov_size(data_sg, data_num); + size_t data_size = iov_size(data_sg, data_num), cmd_size; struct iovec out, in; ssize_t r; + unsigned dummy_cursor_iov_cnt; + VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); + cmd_size = sizeof(ctrl) + data_size; + if (vhost_svq_available_slots(svq) < 2 || + iov_size(out_cursor, 1) < cmd_size) { + /* + * It is time to flush all pending control commands if SVQ is full + * or control commands shadow buffers are full. + * + * We can poll here since we've had BQL from the time + * we sent the descriptor. + */ + r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - + (void *)s->status); + if (unlikely(r < 0)) { + return r; + } + + vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor); + } /* pack the CVQ command header */ iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl)); @@ -684,7 +729,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, out_cursor->iov_base + sizeof(ctrl), data_size); /* extract the required buffer from the cursor for output */ - iov_copy(&out, 1, out_cursor, 1, 0, sizeof(ctrl) + data_size); + iov_copy(&out, 1, out_cursor, 1, 0, cmd_size); /* extract the required buffer from the cursor for input */ iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); @@ -693,11 +738,13 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, return r; } - /* - * We can poll here since we've had BQL from the time - * we sent the descriptor. - */ - return vhost_vdpa_net_svq_poll(s, 1); + /* iterate the cursors */ + dummy_cursor_iov_cnt = 1; + iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size); + dummy_cursor_iov_cnt = 1; + iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); + + return 0; } static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, @@ -709,15 +756,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, .iov_base = (void *)n->mac, .iov_len = sizeof(n->mac), }; - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, - VIRTIO_NET_CTRL_MAC, - VIRTIO_NET_CTRL_MAC_ADDR_SET, - &data, 1); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_MAC, + VIRTIO_NET_CTRL_MAC_ADDR_SET, + &data, 1); + if (unlikely(r < 0)) { + return r; } } @@ -762,15 +806,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, .iov_len = mul_macs_size, }, }; - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, - VIRTIO_NET_CTRL_MAC, - VIRTIO_NET_CTRL_MAC_TABLE_SET, - data, ARRAY_SIZE(data)); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_MAC, + VIRTIO_NET_CTRL_MAC_TABLE_SET, + data, ARRAY_SIZE(data)); + if (unlikely(r < 0)) { + return r; } return 0; @@ -782,7 +823,7 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, struct iovec *in_cursor) { struct virtio_net_ctrl_mq mq; - ssize_t dev_written; + ssize_t r; if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { return 0; @@ -793,15 +834,12 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, .iov_base = &mq, .iov_len = sizeof(mq), }; - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, - VIRTIO_NET_CTRL_MQ, - VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, - &data, 1); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_MQ, + VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, + &data, 1); + if (unlikely(r < 0)) { + return r; } return 0; @@ -813,7 +851,7 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, struct iovec *in_cursor) { uint64_t offloads; - ssize_t dev_written; + ssize_t r; if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { @@ -841,15 +879,12 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, .iov_base = &offloads, .iov_len = sizeof(offloads), }; - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, - VIRTIO_NET_CTRL_GUEST_OFFLOADS, - VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, - &data, 1); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_GUEST_OFFLOADS, + VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, + &data, 1); + if (unlikely(r < 0)) { + return r; } return 0; @@ -865,16 +900,12 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, .iov_base = &on, .iov_len = sizeof(on), }; - ssize_t dev_written; + ssize_t r; - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, - VIRTIO_NET_CTRL_RX, - cmd, &data, 1); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_RX, cmd, &data, 1); + if (unlikely(r < 0)) { + return r; } return 0; @@ -1031,15 +1062,12 @@ static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, .iov_base = &vid, .iov_len = sizeof(vid), }; - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, - VIRTIO_NET_CTRL_VLAN, - VIRTIO_NET_CTRL_VLAN_ADD, - &data, 1); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (unlikely(*s->status != VIRTIO_NET_OK)) { - return -EIO; + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_VLAN, + VIRTIO_NET_CTRL_VLAN_ADD, + &data, 1); + if (unlikely(r < 0)) { + return r; } return 0; @@ -1106,6 +1134,17 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc) if (unlikely(r)) { return r; } + + /* + * We need to poll and check all pending device's used buffers. + * + * We can poll here since we've had BQL from the time + * we sent the descriptor. + */ + r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); + if (unlikely(r)) { + return r; + } } for (int i = 0; i < v->dev->vq_index; ++i) { -- cgit v1.2.3