aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/block/virtio-blk.c226
-rw-r--r--hw/scsi/scsi-bus.c63
-rw-r--r--hw/scsi/virtio-scsi.c7
-rw-r--r--hw/usb/dev-storage-classic.c5
-rw-r--r--hw/virtio/virtio.c42
5 files changed, 209 insertions, 134 deletions
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 227d83569f..738cb2ac36 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -37,6 +37,8 @@
#include "hw/virtio/virtio-blk-common.h"
#include "qemu/coroutine.h"
+static void virtio_blk_ioeventfd_attach(VirtIOBlock *s);
+
static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
VirtIOBlockReq *req)
{
@@ -64,7 +66,7 @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
iov_discard_undo(&req->inhdr_undo);
iov_discard_undo(&req->outhdr_undo);
virtqueue_push(req->vq, &req->elem, req->in_len);
- if (s->ioeventfd_started && !s->ioeventfd_disabled) {
+ if (qemu_in_iothread()) {
virtio_notify_irqfd(vdev, req->vq);
} else {
virtio_notify(vdev, req->vq);
@@ -661,6 +663,9 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret)
int64_t zrp_size, n, j = 0;
int64_t nz = data->zone_report_data.nr_zones;
int8_t err_status = VIRTIO_BLK_S_OK;
+ struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) {
+ .nr_zones = cpu_to_le64(nz),
+ };
trace_virtio_blk_zone_report_complete(vdev, req, nz, ret);
if (ret) {
@@ -668,9 +673,6 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret)
goto out;
}
- struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) {
- .nr_zones = cpu_to_le64(nz),
- };
zrp_size = sizeof(struct virtio_blk_zone_report)
+ sizeof(struct virtio_blk_zone_descriptor) * nz;
n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr));
@@ -898,13 +900,14 @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req,
int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
int64_t len = iov_size(out_iov, out_num);
+ ZoneCmdData *data;
trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS);
if (!check_zoned_request(s, offset, len, true, &err_status)) {
goto out;
}
- ZoneCmdData *data = g_malloc(sizeof(ZoneCmdData));
+ data = g_malloc(sizeof(ZoneCmdData));
data->req = req;
data->in_iov = in_iov;
data->in_num = in_num;
@@ -1191,14 +1194,15 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running,
{
VirtIOBlock *s = opaque;
uint16_t num_queues = s->conf.num_queues;
+ g_autofree VirtIOBlockReq **vq_rq = NULL;
+ VirtIOBlockReq *rq;
if (!running) {
return;
}
/* Split the device-wide s->rq request list into per-vq request lists */
- g_autofree VirtIOBlockReq **vq_rq = g_new0(VirtIOBlockReq *, num_queues);
- VirtIOBlockReq *rq;
+ vq_rq = g_new0(VirtIOBlockReq *, num_queues);
WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
rq = s->rq;
@@ -1209,6 +1213,8 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running,
VirtIOBlockReq *next = rq->next;
uint16_t idx = virtio_get_queue_index(rq->vq);
+ /* Only num_queues vqs were created so vq_rq[idx] is within bounds */
+ assert(idx < num_queues);
rq->next = vq_rq[idx];
vq_rq[idx] = rq;
rq = next;
@@ -1485,68 +1491,6 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
return 0;
}
-static bool
-validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
- uint16_t num_queues, Error **errp)
-{
- g_autofree unsigned long *vqs = bitmap_new(num_queues);
- g_autoptr(GHashTable) iothreads =
- g_hash_table_new(g_str_hash, g_str_equal);
-
- for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
- const char *name = node->value->iothread;
- uint16List *vq;
-
- if (!iothread_by_id(name)) {
- error_setg(errp, "IOThread \"%s\" object does not exist", name);
- return false;
- }
-
- if (!g_hash_table_add(iothreads, (gpointer)name)) {
- error_setg(errp,
- "duplicate IOThread name \"%s\" in iothread-vq-mapping",
- name);
- return false;
- }
-
- if (node != list) {
- if (!!node->value->vqs != !!list->value->vqs) {
- error_setg(errp, "either all items in iothread-vq-mapping "
- "must have vqs or none of them must have it");
- return false;
- }
- }
-
- for (vq = node->value->vqs; vq; vq = vq->next) {
- if (vq->value >= num_queues) {
- error_setg(errp, "vq index %u for IOThread \"%s\" must be "
- "less than num_queues %u in iothread-vq-mapping",
- vq->value, name, num_queues);
- return false;
- }
-
- if (test_and_set_bit(vq->value, vqs)) {
- error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
- "because it is already assigned", vq->value, name);
- return false;
- }
- }
- }
-
- if (list->value->vqs) {
- for (uint16_t i = 0; i < num_queues; i++) {
- if (!test_bit(i, vqs)) {
- error_setg(errp,
- "missing vq %u IOThread assignment in iothread-vq-mapping",
- i);
- return false;
- }
- }
- }
-
- return true;
-}
-
static void virtio_resize_cb(void *opaque)
{
VirtIODevice *vdev = opaque;
@@ -1613,15 +1557,95 @@ static const BlockDevOps virtio_block_ops = {
.drained_end = virtio_blk_drained_end,
};
-/* Generate vq:AioContext mappings from a validated iothread-vq-mapping list */
-static void
-apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
- AioContext **vq_aio_context, uint16_t num_queues)
+static bool
+validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
+ uint16_t num_queues, Error **errp)
+{
+ g_autofree unsigned long *vqs = bitmap_new(num_queues);
+ g_autoptr(GHashTable) iothreads =
+ g_hash_table_new(g_str_hash, g_str_equal);
+
+ for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
+ const char *name = node->value->iothread;
+ uint16List *vq;
+
+ if (!iothread_by_id(name)) {
+ error_setg(errp, "IOThread \"%s\" object does not exist", name);
+ return false;
+ }
+
+ if (!g_hash_table_add(iothreads, (gpointer)name)) {
+ error_setg(errp,
+ "duplicate IOThread name \"%s\" in iothread-vq-mapping",
+ name);
+ return false;
+ }
+
+ if (node != list) {
+ if (!!node->value->vqs != !!list->value->vqs) {
+ error_setg(errp, "either all items in iothread-vq-mapping "
+ "must have vqs or none of them must have it");
+ return false;
+ }
+ }
+
+ for (vq = node->value->vqs; vq; vq = vq->next) {
+ if (vq->value >= num_queues) {
+ error_setg(errp, "vq index %u for IOThread \"%s\" must be "
+ "less than num_queues %u in iothread-vq-mapping",
+ vq->value, name, num_queues);
+ return false;
+ }
+
+ if (test_and_set_bit(vq->value, vqs)) {
+ error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
+ "because it is already assigned", vq->value, name);
+ return false;
+ }
+ }
+ }
+
+ if (list->value->vqs) {
+ for (uint16_t i = 0; i < num_queues; i++) {
+ if (!test_bit(i, vqs)) {
+ error_setg(errp,
+ "missing vq %u IOThread assignment in iothread-vq-mapping",
+ i);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/**
+ * apply_iothread_vq_mapping:
+ * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads.
+ * @vq_aio_context: The array of AioContext pointers to fill in.
+ * @num_queues: The length of @vq_aio_context.
+ * @errp: If an error occurs, a pointer to the area to store the error.
+ *
+ * Fill in the AioContext for each virtqueue in the @vq_aio_context array given
+ * the iothread-vq-mapping parameter in @iothread_vq_mapping_list.
+ *
+ * Returns: %true on success, %false on failure.
+ **/
+static bool apply_iothread_vq_mapping(
+ IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
+ AioContext **vq_aio_context,
+ uint16_t num_queues,
+ Error **errp)
{
IOThreadVirtQueueMappingList *node;
size_t num_iothreads = 0;
size_t cur_iothread = 0;
+ if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list,
+ num_queues, errp)) {
+ return false;
+ }
+
for (node = iothread_vq_mapping_list; node; node = node->next) {
num_iothreads++;
}
@@ -1638,6 +1662,7 @@ apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
/* Explicit vq:IOThread assignment */
for (vq = node->value->vqs; vq; vq = vq->next) {
+ assert(vq->value < num_queues);
vq_aio_context[vq->value] = ctx;
}
} else {
@@ -1650,6 +1675,8 @@ apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
cur_iothread++;
}
+
+ return true;
}
/* Context: BQL held */
@@ -1660,6 +1687,13 @@ static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ if (conf->iothread && conf->iothread_vq_mapping_list) {
+ error_setg(errp,
+ "iothread and iothread-vq-mapping properties cannot be set "
+ "at the same time");
+ return false;
+ }
+
if (conf->iothread || conf->iothread_vq_mapping_list) {
if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
error_setg(errp,
@@ -1685,8 +1719,14 @@ static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
s->vq_aio_context = g_new(AioContext *, conf->num_queues);
if (conf->iothread_vq_mapping_list) {
- apply_vq_mapping(conf->iothread_vq_mapping_list, s->vq_aio_context,
- conf->num_queues);
+ if (!apply_iothread_vq_mapping(conf->iothread_vq_mapping_list,
+ s->vq_aio_context,
+ conf->num_queues,
+ errp)) {
+ g_free(s->vq_aio_context);
+ s->vq_aio_context = NULL;
+ return false;
+ }
} else if (conf->iothread) {
AioContext *ctx = iothread_get_aio_context(conf->iothread);
for (unsigned i = 0; i < conf->num_queues; i++) {
@@ -1790,6 +1830,7 @@ static int virtio_blk_start_ioeventfd(VirtIODevice *vdev)
* Try to change the AioContext so that block jobs and other operations can
* co-locate their activity in the same AioContext. If it fails, nevermind.
*/
+ assert(nvqs > 0); /* enforced during ->realize() */
r = blk_set_aio_context(s->conf.conf.blk, s->vq_aio_context[0],
&local_err);
if (r < 0) {
@@ -1808,17 +1849,14 @@ static int virtio_blk_start_ioeventfd(VirtIODevice *vdev)
s->ioeventfd_started = true;
smp_wmb(); /* paired with aio_notify_accept() on the read side */
- /* Get this show started by hooking up our callbacks */
- for (i = 0; i < nvqs; i++) {
- VirtQueue *vq = virtio_get_queue(vdev, i);
- AioContext *ctx = s->vq_aio_context[i];
-
- /* Kick right away to begin processing requests already in vring */
- event_notifier_set(virtio_queue_get_host_notifier(vq));
-
- if (!blk_in_drain(s->conf.conf.blk)) {
- virtio_queue_aio_attach_host_notifier(vq, ctx);
- }
+ /*
+ * Get this show started by hooking up our callbacks. If drained now,
+ * virtio_blk_drained_end() will do this later.
+ * Attaching the notifier also kicks the virtqueues, processing any requests
+ * they may already have.
+ */
+ if (!blk_in_drain(s->conf.conf.blk)) {
+ virtio_blk_ioeventfd_attach(s);
}
return 0;
@@ -1924,6 +1962,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOBlock *s = VIRTIO_BLK(dev);
VirtIOBlkConf *conf = &s->conf;
+ BlockDriverState *bs;
Error *err = NULL;
unsigned i;
@@ -1969,7 +2008,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
return;
}
- BlockDriverState *bs = blk_bs(conf->conf.blk);
+ bs = blk_bs(conf->conf.blk);
if (bs->bl.zoned != BLK_Z_NONE) {
virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED);
if (bs->bl.zoned == BLK_Z_HM) {
@@ -1996,19 +2035,6 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
return;
}
- if (conf->iothread_vq_mapping_list) {
- if (conf->iothread) {
- error_setg(errp, "iothread and iothread-vq-mapping properties "
- "cannot be set at the same time");
- return;
- }
-
- if (!validate_iothread_vq_mapping_list(conf->iothread_vq_mapping_list,
- conf->num_queues, errp)) {
- return;
- }
- }
-
s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
s->host_features);
virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index 0a2eb11c56..9e40b0c920 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -120,17 +120,13 @@ static void scsi_device_for_each_req_async_bh(void *opaque)
SCSIRequest *next;
/*
- * If the AioContext changed before this BH was called then reschedule into
- * the new AioContext before accessing ->requests. This can happen when
- * scsi_device_for_each_req_async() is called and then the AioContext is
- * changed before BHs are run.
+ * The BB cannot have changed contexts between this BH being scheduled and
+ * now: BBs' AioContexts, when they have a node attached, can only be
+ * changed via bdrv_try_change_aio_context(), in a drained section. While
+ * we have the in-flight counter incremented, that drain must block.
*/
ctx = blk_get_aio_context(s->conf.blk);
- if (ctx != qemu_get_current_aio_context()) {
- aio_bh_schedule_oneshot(ctx, scsi_device_for_each_req_async_bh,
- g_steal_pointer(&data));
- return;
- }
+ assert(ctx == qemu_get_current_aio_context());
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
data->fn(req, data->fn_opaque);
@@ -138,11 +134,16 @@ static void scsi_device_for_each_req_async_bh(void *opaque)
/* Drop the reference taken by scsi_device_for_each_req_async() */
object_unref(OBJECT(s));
+
+ /* Paired with blk_inc_in_flight() in scsi_device_for_each_req_async() */
+ blk_dec_in_flight(s->conf.blk);
}
/*
* Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
* runs in the AioContext that is executing the request.
+ * Keeps the BlockBackend's in-flight counter incremented until everything is
+ * done, so draining it will settle all scheduled @fn() calls.
*/
static void scsi_device_for_each_req_async(SCSIDevice *s,
void (*fn)(SCSIRequest *, void *),
@@ -163,6 +164,8 @@ static void scsi_device_for_each_req_async(SCSIDevice *s,
*/
object_ref(OBJECT(s));
+ /* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() */
+ blk_inc_in_flight(s->conf.blk);
aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
scsi_device_for_each_req_async_bh,
data);
@@ -373,15 +376,13 @@ static void scsi_qdev_unrealize(DeviceState *qdev)
/* handle legacy '-drive if=scsi,...' cmd line args */
SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
- int unit, bool removable, int bootindex,
- bool share_rw,
- BlockdevOnError rerror,
- BlockdevOnError werror,
+ int unit, bool removable, BlockConf *conf,
const char *serial, Error **errp)
{
const char *driver;
char *name;
DeviceState *dev;
+ SCSIDevice *s;
DriveInfo *dinfo;
if (blk_is_sg(blk)) {
@@ -399,11 +400,10 @@ SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
object_property_add_child(OBJECT(bus), name, OBJECT(dev));
g_free(name);
+ s = SCSI_DEVICE(dev);
+ s->conf = *conf;
+
qdev_prop_set_uint32(dev, "scsi-id", unit);
- if (bootindex >= 0) {
- object_property_set_int(OBJECT(dev), "bootindex", bootindex,
- &error_abort);
- }
if (object_property_find(OBJECT(dev), "removable")) {
qdev_prop_set_bit(dev, "removable", removable);
}
@@ -414,19 +414,12 @@ SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
object_unparent(OBJECT(dev));
return NULL;
}
- if (!object_property_set_bool(OBJECT(dev), "share-rw", share_rw, errp)) {
- object_unparent(OBJECT(dev));
- return NULL;
- }
-
- qdev_prop_set_enum(dev, "rerror", rerror);
- qdev_prop_set_enum(dev, "werror", werror);
if (!qdev_realize_and_unref(dev, &bus->qbus, errp)) {
object_unparent(OBJECT(dev));
return NULL;
}
- return SCSI_DEVICE(dev);
+ return s;
}
void scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
@@ -434,6 +427,12 @@ void scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
Location loc;
DriveInfo *dinfo;
int unit;
+ BlockConf conf = {
+ .bootindex = -1,
+ .share_rw = false,
+ .rerror = BLOCKDEV_ON_ERROR_AUTO,
+ .werror = BLOCKDEV_ON_ERROR_AUTO,
+ };
loc_push_none(&loc);
for (unit = 0; unit <= bus->info->max_target; unit++) {
@@ -443,10 +442,7 @@ void scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
}
qemu_opts_loc_restore(dinfo->opts);
scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
- unit, false, -1, false,
- BLOCKDEV_ON_ERROR_AUTO,
- BLOCKDEV_ON_ERROR_AUTO,
- NULL, &error_fatal);
+ unit, false, &conf, NULL, &error_fatal);
}
loc_pop(&loc);
}
@@ -1728,11 +1724,20 @@ static void scsi_device_purge_one_req(SCSIRequest *req, void *opaque)
scsi_req_cancel_async(req, NULL);
}
+/**
+ * Cancel all requests, and block until they are deleted.
+ */
void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
{
scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL);
+ /*
+ * Await all the scsi_device_purge_one_req() calls scheduled by
+ * scsi_device_for_each_req_async(), and all I/O requests that were
+ * cancelled this way, but may still take a bit of time to settle.
+ */
blk_drain(sdev->conf.blk);
+
scsi_device_set_ua(sdev, sense);
}
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 690aceec45..9f02ceea09 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -1149,6 +1149,7 @@ static void virtio_scsi_drained_begin(SCSIBus *bus)
static void virtio_scsi_drained_end(SCSIBus *bus)
{
VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
VirtIODevice *vdev = VIRTIO_DEVICE(s);
uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
s->parent_obj.conf.num_queues;
@@ -1166,7 +1167,11 @@ static void virtio_scsi_drained_end(SCSIBus *bus)
for (uint32_t i = 0; i < total_queues; i++) {
VirtQueue *vq = virtio_get_queue(vdev, i);
- virtio_queue_aio_attach_host_notifier(vq, s->ctx);
+ if (vq == vs->event_vq) {
+ virtio_queue_aio_attach_host_notifier_no_poll(vq, s->ctx);
+ } else {
+ virtio_queue_aio_attach_host_notifier(vq, s->ctx);
+ }
}
}
diff --git a/hw/usb/dev-storage-classic.c b/hw/usb/dev-storage-classic.c
index 84d19752b5..50a3ad6285 100644
--- a/hw/usb/dev-storage-classic.c
+++ b/hw/usb/dev-storage-classic.c
@@ -67,10 +67,7 @@ static void usb_msd_storage_realize(USBDevice *dev, Error **errp)
scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev),
&usb_msd_scsi_info_storage);
scsi_dev = scsi_bus_legacy_add_drive(&s->bus, blk, 0, !!s->removable,
- s->conf.bootindex, s->conf.share_rw,
- s->conf.rerror, s->conf.werror,
- dev->serial,
- errp);
+ &s->conf, dev->serial, errp);
blk_unref(blk);
if (!scsi_dev) {
return;
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 7549094154..d229755eae 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -3556,6 +3556,17 @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
{
+ /*
+ * virtio_queue_aio_detach_host_notifier() can leave notifications disabled.
+ * Re-enable them. (And if detach has not been used before, notifications
+ * being enabled is still the default state while a notifier is attached;
+ * see virtio_queue_host_notifier_aio_poll_end(), which will always leave
+ * notifications enabled once the polling section is left.)
+ */
+ if (!virtio_queue_get_notification(vq)) {
+ virtio_queue_set_notification(vq, 1);
+ }
+
aio_set_event_notifier(ctx, &vq->host_notifier,
virtio_queue_host_notifier_read,
virtio_queue_host_notifier_aio_poll,
@@ -3563,6 +3574,13 @@ void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
aio_set_event_notifier_poll(ctx, &vq->host_notifier,
virtio_queue_host_notifier_aio_poll_begin,
virtio_queue_host_notifier_aio_poll_end);
+
+ /*
+ * We will have ignored notifications about new requests from the guest
+ * while no notifiers were attached, so "kick" the virt queue to process
+ * those requests now.
+ */
+ event_notifier_set(&vq->host_notifier);
}
/*
@@ -3573,14 +3591,38 @@ void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
*/
void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
{
+ /* See virtio_queue_aio_attach_host_notifier() */
+ if (!virtio_queue_get_notification(vq)) {
+ virtio_queue_set_notification(vq, 1);
+ }
+
aio_set_event_notifier(ctx, &vq->host_notifier,
virtio_queue_host_notifier_read,
NULL, NULL);
+
+ /*
+ * See virtio_queue_aio_attach_host_notifier().
+ * Note that this may be unnecessary for the type of virtqueues this
+ * function is used for. Still, it will not hurt to have a quick look into
+ * whether we can/should process any of the virtqueue elements.
+ */
+ event_notifier_set(&vq->host_notifier);
}
void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
{
aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL);
+
+ /*
+ * aio_set_event_notifier_poll() does not guarantee whether io_poll_end()
+ * will run after io_poll_begin(), so by removing the notifier, we do not
+ * know whether virtio_queue_host_notifier_aio_poll_end() has run after a
+ * previous virtio_queue_host_notifier_aio_poll_begin(), i.e. whether
+ * notifications are enabled or disabled. It does not really matter anyway;
+ * we just removed the notifier, so we do not care about notifications until
+ * we potentially re-attach it. The attach_host_notifier functions will
+ * ensure that notifications are enabled again when they are needed.
+ */
}
void virtio_queue_host_notifier_read(EventNotifier *n)