diff options
author | Laurent Vivier <lvivier@redhat.com> | 2022-10-20 11:58:46 +0200 |
---|---|---|
committer | Jason Wang <jasowang@redhat.com> | 2022-10-28 13:28:52 +0800 |
commit | 7550a82259fcf9ce5f1f6443ced779d0eb8afdca (patch) | |
tree | 42abd6f57db3904c9b2e77d84a69eff6a9a7794b /hw/net | |
parent | df8d07081718c29d04d106583d9c300128686cda (diff) |
virtio-net: fix TX timer with tx_burst
When virtio_net_flush_tx() reaches the tx_burst value all
the queue is not flushed and nothing restart the timer.
Fix that by doing for TX timer as we do for bottom half TX:
rearming the timer if we find any packet to send during the
virtio_net_flush_tx() call.
Fixes: e3f30488e5f8 ("virtio-net: Limit number of packets sent per TX flush")
Cc: alex.williamson@redhat.com
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Diffstat (limited to 'hw/net')
-rw-r--r-- | hw/net/virtio-net.c | 50 |
1 files changed, 41 insertions, 9 deletions
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 1fbf2f3e19..b6903aea54 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -2536,14 +2536,19 @@ static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) virtio_queue_set_notification(q->tx_vq, 1); ret = virtio_net_flush_tx(q); - if (q->tx_bh && ret >= n->tx_burst) { + if (ret >= n->tx_burst) { /* * the flush has been stopped by tx_burst * we will not receive notification for the * remainining part, so re-schedule */ virtio_queue_set_notification(q->tx_vq, 0); - qemu_bh_schedule(q->tx_bh); + if (q->tx_bh) { + qemu_bh_schedule(q->tx_bh); + } else { + timer_mod(q->tx_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + } q->tx_waiting = 1; } } @@ -2644,6 +2649,8 @@ drop: return num_packets; } +static void virtio_net_tx_timer(void *opaque); + static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) { VirtIONet *n = VIRTIO_NET(vdev); @@ -2661,15 +2668,13 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) } if (q->tx_waiting) { - virtio_queue_set_notification(vq, 1); + /* We already have queued packets, immediately flush */ timer_del(q->tx_timer); - q->tx_waiting = 0; - if (virtio_net_flush_tx(q) == -EINVAL) { - return; - } + virtio_net_tx_timer(q); } else { + /* re-arm timer to flush it (and more) on next tick */ timer_mod(q->tx_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); q->tx_waiting = 1; virtio_queue_set_notification(vq, 0); } @@ -2702,6 +2707,8 @@ static void virtio_net_tx_timer(void *opaque) VirtIONetQueue *q = opaque; VirtIONet *n = q->n; VirtIODevice *vdev = VIRTIO_DEVICE(n); + int ret; + /* This happens when device was stopped but BH wasn't. */ if (!vdev->vm_running) { /* Make sure tx waiting is set, so we'll run when restarted. */ @@ -2716,8 +2723,33 @@ static void virtio_net_tx_timer(void *opaque) return; } + ret = virtio_net_flush_tx(q); + if (ret == -EBUSY || ret == -EINVAL) { + return; + } + /* + * If we flush a full burst of packets, assume there are + * more coming and immediately rearm + */ + if (ret >= n->tx_burst) { + q->tx_waiting = 1; + timer_mod(q->tx_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + return; + } + /* + * If less than a full burst, re-enable notification and flush + * anything that may have come in while we weren't looking. If + * we find something, assume the guest is still active and rearm + */ virtio_queue_set_notification(q->tx_vq, 1); - virtio_net_flush_tx(q); + ret = virtio_net_flush_tx(q); + if (ret > 0) { + virtio_queue_set_notification(q->tx_vq, 0); + q->tx_waiting = 1; + timer_mod(q->tx_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + } } static void virtio_net_tx_bh(void *opaque) |