diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2014-03-03 11:30:08 +0100 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2014-03-13 14:42:24 +0100 |
commit | 48ff269272f18d2b8fa53cb08365df417588f585 (patch) | |
tree | 45fb082c737ed43501322e2fd042d5a03d655785 /hw | |
parent | 6e4a876b433f78f72724f45ae3f9e26596da1b4d (diff) |
dataplane: replace internal thread with IOThread
Today virtio-blk dataplane uses a 1:1 device-per-thread model. Now that
IOThreads have been introduced we can generalize this to N:M devices per
threads.
This patch drops thread code from dataplane in favor of running inside
an IOThread AioContext.
As a bonus we solve the case where a guest keeps submitting I/O requests
while dataplane is trying to stop. Previously the dataplane thread
would continue to process requests until the request gave it a break.
Now we can shut down in bounded time thanks to
aio_context_acquire/release.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/block/dataplane/virtio-blk.c | 96 |
1 files changed, 54 insertions, 42 deletions
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c index d1c7ad4574..a5afc217c0 100644 --- a/hw/block/dataplane/virtio-blk.c +++ b/hw/block/dataplane/virtio-blk.c @@ -23,6 +23,7 @@ #include "virtio-blk.h" #include "block/aio.h" #include "hw/virtio/virtio-bus.h" +#include "monitor/monitor.h" /* for object_add() */ enum { SEG_MAX = 126, /* maximum number of I/O segments */ @@ -44,8 +45,6 @@ struct VirtIOBlockDataPlane { bool started; bool starting; bool stopping; - QEMUBH *start_bh; - QemuThread thread; VirtIOBlkConf *blk; int fd; /* image file descriptor */ @@ -59,12 +58,14 @@ struct VirtIOBlockDataPlane { * (because you don't own the file descriptor or handle; you just * use it). */ + IOThread *iothread; + bool internal_iothread; AioContext *ctx; EventNotifier io_notifier; /* Linux AIO completion */ EventNotifier host_notifier; /* doorbell */ IOQueue ioqueue; /* Linux AIO queue (should really be per - dataplane thread) */ + IOThread) */ VirtIOBlockRequest requests[REQ_MAX]; /* pool of requests, managed by the queue */ @@ -342,26 +343,7 @@ static void handle_io(EventNotifier *e) } } -static void *data_plane_thread(void *opaque) -{ - VirtIOBlockDataPlane *s = opaque; - - while (!s->stopping || s->num_reqs > 0) { - aio_poll(s->ctx, true); - } - return NULL; -} - -static void start_data_plane_bh(void *opaque) -{ - VirtIOBlockDataPlane *s = opaque; - - qemu_bh_delete(s->start_bh); - s->start_bh = NULL; - qemu_thread_create(&s->thread, "data_plane", data_plane_thread, - s, QEMU_THREAD_JOINABLE); -} - +/* Context: QEMU global mutex held */ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk, VirtIOBlockDataPlane **dataplane, Error **errp) @@ -408,12 +390,33 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk, s->fd = fd; s->blk = blk; + if (blk->iothread) { + s->internal_iothread = false; + s->iothread = blk->iothread; + object_ref(OBJECT(s->iothread)); + } else { + /* Create per-device IOThread if none specified */ + Error *local_err = NULL; + + s->internal_iothread = true; + object_add(TYPE_IOTHREAD, vdev->name, NULL, NULL, &local_err); + if (error_is_set(&local_err)) { + error_propagate(errp, local_err); + g_free(s); + return; + } + s->iothread = iothread_find(vdev->name); + assert(s->iothread); + } + s->ctx = iothread_get_aio_context(s->iothread); + /* Prevent block operations that conflict with data plane thread */ bdrv_set_in_use(blk->conf.bs, 1); *dataplane = s; } +/* Context: QEMU global mutex held */ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) { if (!s) { @@ -422,9 +425,14 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) virtio_blk_data_plane_stop(s); bdrv_set_in_use(s->blk->conf.bs, 0); + object_unref(OBJECT(s->iothread)); + if (s->internal_iothread) { + object_unparent(OBJECT(s->iothread)); + } g_free(s); } +/* Context: QEMU global mutex held */ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev))); @@ -448,8 +456,6 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) return; } - s->ctx = aio_context_new(); - /* Set up guest notifier (irq) */ if (k->set_guest_notifiers(qbus->parent, 1, true) != 0) { fprintf(stderr, "virtio-blk failed to set guest notifier, " @@ -464,7 +470,6 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) exit(1); } s->host_notifier = *virtio_queue_get_host_notifier(vq); - aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify); /* Set up ioqueue */ ioq_init(&s->ioqueue, s->fd, REQ_MAX); @@ -472,7 +477,6 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) ioq_put_iocb(&s->ioqueue, &s->requests[i].iocb); } s->io_notifier = *ioq_get_notifier(&s->ioqueue); - aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io); s->starting = false; s->started = true; @@ -481,11 +485,14 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) /* Kick right away to begin processing requests already in vring */ event_notifier_set(virtio_queue_get_host_notifier(vq)); - /* Spawn thread in BH so it inherits iothread cpusets */ - s->start_bh = qemu_bh_new(start_data_plane_bh, s); - qemu_bh_schedule(s->start_bh); + /* Get this show started by hooking up our callbacks */ + aio_context_acquire(s->ctx); + aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify); + aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io); + aio_context_release(s->ctx); } +/* Context: QEMU global mutex held */ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev))); @@ -496,27 +503,32 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s) s->stopping = true; trace_virtio_blk_data_plane_stop(s); - /* Stop thread or cancel pending thread creation BH */ - if (s->start_bh) { - qemu_bh_delete(s->start_bh); - s->start_bh = NULL; - } else { - aio_notify(s->ctx); - qemu_thread_join(&s->thread); + aio_context_acquire(s->ctx); + + /* Stop notifications for new requests from guest */ + aio_set_event_notifier(s->ctx, &s->host_notifier, NULL); + + /* Complete pending requests */ + while (s->num_reqs > 0) { + aio_poll(s->ctx, true); } + /* Stop ioq callbacks (there are no pending requests left) */ aio_set_event_notifier(s->ctx, &s->io_notifier, NULL); - ioq_cleanup(&s->ioqueue); - aio_set_event_notifier(s->ctx, &s->host_notifier, NULL); - k->set_host_notifier(qbus->parent, 0, false); + aio_context_release(s->ctx); - aio_context_unref(s->ctx); + /* Sync vring state back to virtqueue so that non-dataplane request + * processing can continue when we disable the host notifier below. + */ + vring_teardown(&s->vring, s->vdev, 0); + + ioq_cleanup(&s->ioqueue); + k->set_host_notifier(qbus->parent, 0, false); /* Clean up guest notifier (irq) */ k->set_guest_notifiers(qbus->parent, 1, false); - vring_teardown(&s->vring, s->vdev, 0); s->started = false; s->stopping = false; } |