diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2022-11-02 14:23:37 -0400 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2023-01-23 15:01:23 -0500 |
commit | a937f8e8577babc32b24d4f518cb336c013cd14f (patch) | |
tree | d755bbc711e4c71842124a391d4e4ede9b88306f | |
parent | 816a430c517eae48da5a31207ca43151df3203b0 (diff) |
virtio-blk: simplify virtio_blk_dma_restart_cb()
virtio_blk_dma_restart_cb() is tricky because the BH must deal with
virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() being called.
There are two issues with the code:
1. virtio_blk_realize() should use qdev_add_vm_change_state_handler()
instead of qemu_add_vm_change_state_handler(). This ensures the
ordering with virtio_init()'s vm change state handler that calls
virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() is
well-defined. Then blk's AioContext is guaranteed to be up-to-date in
virtio_blk_dma_restart_cb() and it's no longer necessary to have a
special case for virtio_blk_data_plane_start().
2. Only blk_drain() waits for virtio_blk_dma_restart_cb()'s
blk_inc_in_flight() to be decremented. The bdrv_drain() family of
functions do not wait for BlockBackend's in_flight counter to reach
zero. virtio_blk_data_plane_stop() relies on blk_set_aio_context()'s
implicit drain, but that's a bdrv_drain() and not a blk_drain().
Note that virtio_blk_reset() already correctly relies on blk_drain().
If virtio_blk_data_plane_stop() switches to blk_drain() then we can
properly wait for pending virtio_blk_dma_restart_bh() calls.
Once these issues are taken care of the code becomes simpler. This
change is in preparation for multiple IOThreads in virtio-blk where we
need to clean up the multi-threading behavior.
I ran the reproducer from commit 49b44549ace7 ("virtio-blk: On restart,
process queued requests in the proper context") to check that there is
no regression.
Cc: Sergio Lopez <slp@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Message-id: 20221102182337.252202-1-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r-- | hw/block/dataplane/virtio-blk.c | 17 | ||||
-rw-r--r-- | hw/block/virtio-blk.c | 44 | ||||
-rw-r--r-- | include/hw/virtio/virtio-blk.h | 2 |
3 files changed, 25 insertions, 38 deletions
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c index 26f965cabc..b28d81737e 100644 --- a/hw/block/dataplane/virtio-blk.c +++ b/hw/block/dataplane/virtio-blk.c @@ -237,9 +237,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) goto fail_aio_context; } - /* Process queued requests before the ones in vring */ - virtio_blk_process_queued_requests(vblk, false); - /* Kick right away to begin processing requests already in vring */ for (i = 0; i < nvqs; i++) { VirtQueue *vq = virtio_get_queue(s->vdev, i); @@ -272,11 +269,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) fail_host_notifiers: k->set_guest_notifiers(qbus->parent, nvqs, false); fail_guest_notifiers: - /* - * If we failed to set up the guest notifiers queued requests will be - * processed on the main context. - */ - virtio_blk_process_queued_requests(vblk, false); vblk->dataplane_disabled = true; s->starting = false; vblk->dataplane_started = true; @@ -325,8 +317,13 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev) aio_context_acquire(s->ctx); aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s); - /* Drain and try to switch bs back to the QEMU main loop. If other users - * keep the BlockBackend in the iothread, that's ok */ + /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */ + blk_drain(s->conf->conf.blk); + + /* + * Try to switch bs back to the QEMU main loop. If other users keep the + * BlockBackend in the iothread, that's ok + */ blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL); aio_context_release(s->ctx); diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index f717550fdc..1762517878 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -806,8 +806,10 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) virtio_blk_handle_vq(s, vq); } -void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh) +static void virtio_blk_dma_restart_bh(void *opaque) { + VirtIOBlock *s = opaque; + VirtIOBlockReq *req = s->rq; MultiReqBuffer mrb = {}; @@ -834,43 +836,27 @@ void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh) if (mrb.num_reqs) { virtio_blk_submit_multireq(s, &mrb); } - if (is_bh) { - blk_dec_in_flight(s->conf.conf.blk); - } - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); -} - -static void virtio_blk_dma_restart_bh(void *opaque) -{ - VirtIOBlock *s = opaque; - qemu_bh_delete(s->bh); - s->bh = NULL; + /* Paired with inc in virtio_blk_dma_restart_cb() */ + blk_dec_in_flight(s->conf.conf.blk); - virtio_blk_process_queued_requests(s, true); + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); } static void virtio_blk_dma_restart_cb(void *opaque, bool running, RunState state) { VirtIOBlock *s = opaque; - BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); - VirtioBusState *bus = VIRTIO_BUS(qbus); if (!running) { return; } - /* - * If ioeventfd is enabled, don't schedule the BH here as queued - * requests will be processed while starting the data plane. - */ - if (!s->bh && !virtio_bus_ioeventfd_enabled(bus)) { - s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk), - virtio_blk_dma_restart_bh, s); - blk_inc_in_flight(s->conf.conf.blk); - qemu_bh_schedule(s->bh); - } + /* Paired with dec in virtio_blk_dma_restart_bh() */ + blk_inc_in_flight(s->conf.conf.blk); + + aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.conf.blk), + virtio_blk_dma_restart_bh, s); } static void virtio_blk_reset(VirtIODevice *vdev) @@ -1213,7 +1199,13 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) return; } - s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); + /* + * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets + * called after ->start_ioeventfd() has already set blk's AioContext. + */ + s->change = + qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s); + blk_ram_registrar_init(&s->blk_ram_registrar, s->blk); blk_set_dev_ops(s->blk, &virtio_block_ops, s); diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h index 7f589b4146..dafec432ce 100644 --- a/include/hw/virtio/virtio-blk.h +++ b/include/hw/virtio/virtio-blk.h @@ -55,7 +55,6 @@ struct VirtIOBlock { VirtIODevice parent_obj; BlockBackend *blk; void *rq; - QEMUBH *bh; VirtIOBlkConf conf; unsigned short sector_mask; bool original_wce; @@ -93,6 +92,5 @@ typedef struct MultiReqBuffer { } MultiReqBuffer; void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq); -void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh); #endif |