aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2022-01-14 10:43:32 +0000
committerPeter Maydell <peter.maydell@linaro.org>2022-01-14 10:43:32 +0000
commit1001c9d9c0c6c2ef0ae6cc27b8555e1a4e93dc30 (patch)
tree8810cfe592853a589db2b0f91f211e34df10620b /block
parent67b6526cf042f22521feff5ea521a05d3dd2bf8f (diff)
parentdb608fb78444c58896db69495729e4458eeaace1 (diff)
Merge remote-tracking branch 'remotes/stefanha-gitlab/tags/block-pull-request' into staging
Pull request # gpg: Signature made Wed 12 Jan 2022 17:13:54 GMT # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha-gitlab/tags/block-pull-request: virtio: unify dataplane and non-dataplane ->handle_output() virtio: use ->handle_output() instead of ->handle_aio_output() virtio-scsi: prepare virtio_scsi_handle_cmd for dataplane virtio-blk: drop unused virtio_blk_handle_vq() return value virtio: get rid of VirtIOHandleAIOOutput aio-posix: split poll check from ready handler Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'block')
-rw-r--r--block/curl.c11
-rw-r--r--block/export/fuse.c4
-rw-r--r--block/io_uring.c19
-rw-r--r--block/iscsi.c4
-rw-r--r--block/linux-aio.c16
-rw-r--r--block/nfs.c6
-rw-r--r--block/nvme.c51
-rw-r--r--block/ssh.c4
-rw-r--r--block/win32-aio.c4
9 files changed, 71 insertions, 48 deletions
diff --git a/block/curl.c b/block/curl.c
index 4a8ae2b269..6a6cd72975 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -125,7 +125,7 @@ static gboolean curl_drop_socket(void *key, void *value, void *opaque)
BDRVCURLState *s = socket->s;
aio_set_fd_handler(s->aio_context, socket->fd, false,
- NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL, NULL);
return true;
}
@@ -173,19 +173,20 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
switch (action) {
case CURL_POLL_IN:
aio_set_fd_handler(s->aio_context, fd, false,
- curl_multi_do, NULL, NULL, socket);
+ curl_multi_do, NULL, NULL, NULL, socket);
break;
case CURL_POLL_OUT:
aio_set_fd_handler(s->aio_context, fd, false,
- NULL, curl_multi_do, NULL, socket);
+ NULL, curl_multi_do, NULL, NULL, socket);
break;
case CURL_POLL_INOUT:
aio_set_fd_handler(s->aio_context, fd, false,
- curl_multi_do, curl_multi_do, NULL, socket);
+ curl_multi_do, curl_multi_do,
+ NULL, NULL, socket);
break;
case CURL_POLL_REMOVE:
aio_set_fd_handler(s->aio_context, fd, false,
- NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL, NULL);
break;
}
diff --git a/block/export/fuse.c b/block/export/fuse.c
index 823c126d23..6710d8aed8 100644
--- a/block/export/fuse.c
+++ b/block/export/fuse.c
@@ -223,7 +223,7 @@ static int setup_fuse_export(FuseExport *exp, const char *mountpoint,
aio_set_fd_handler(exp->common.ctx,
fuse_session_fd(exp->fuse_session), true,
- read_from_fuse_export, NULL, NULL, exp);
+ read_from_fuse_export, NULL, NULL, NULL, exp);
exp->fd_handler_set_up = true;
return 0;
@@ -267,7 +267,7 @@ static void fuse_export_shutdown(BlockExport *blk_exp)
if (exp->fd_handler_set_up) {
aio_set_fd_handler(exp->common.ctx,
fuse_session_fd(exp->fuse_session), true,
- NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL, NULL);
exp->fd_handler_set_up = false;
}
}
diff --git a/block/io_uring.c b/block/io_uring.c
index dfa475cc87..782afdb433 100644
--- a/block/io_uring.c
+++ b/block/io_uring.c
@@ -292,12 +292,14 @@ static bool qemu_luring_poll_cb(void *opaque)
{
LuringState *s = opaque;
- if (io_uring_cq_ready(&s->ring)) {
- luring_process_completions_and_submit(s);
- return true;
- }
+ return io_uring_cq_ready(&s->ring);
+}
+
+static void qemu_luring_poll_ready(void *opaque)
+{
+ LuringState *s = opaque;
- return false;
+ luring_process_completions_and_submit(s);
}
static void ioq_init(LuringQueue *io_q)
@@ -402,8 +404,8 @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd,
void luring_detach_aio_context(LuringState *s, AioContext *old_context)
{
- aio_set_fd_handler(old_context, s->ring.ring_fd, false, NULL, NULL, NULL,
- s);
+ aio_set_fd_handler(old_context, s->ring.ring_fd, false,
+ NULL, NULL, NULL, NULL, s);
qemu_bh_delete(s->completion_bh);
s->aio_context = NULL;
}
@@ -413,7 +415,8 @@ void luring_attach_aio_context(LuringState *s, AioContext *new_context)
s->aio_context = new_context;
s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s);
aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false,
- qemu_luring_completion_cb, NULL, qemu_luring_poll_cb, s);
+ qemu_luring_completion_cb, NULL,
+ qemu_luring_poll_cb, qemu_luring_poll_ready, s);
}
LuringState *luring_init(Error **errp)
diff --git a/block/iscsi.c b/block/iscsi.c
index 57aa07a40d..51f2a5eeaa 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -363,7 +363,7 @@ iscsi_set_events(IscsiLun *iscsilun)
false,
(ev & POLLIN) ? iscsi_process_read : NULL,
(ev & POLLOUT) ? iscsi_process_write : NULL,
- NULL,
+ NULL, NULL,
iscsilun);
iscsilun->events = ev;
}
@@ -1534,7 +1534,7 @@ static void iscsi_detach_aio_context(BlockDriverState *bs)
IscsiLun *iscsilun = bs->opaque;
aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi),
- false, NULL, NULL, NULL, NULL);
+ false, NULL, NULL, NULL, NULL, NULL);
iscsilun->events = 0;
if (iscsilun->nop_timer) {
diff --git a/block/linux-aio.c b/block/linux-aio.c
index f53ae72e21..4c423fcccf 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -263,12 +263,15 @@ static bool qemu_laio_poll_cb(void *opaque)
LinuxAioState *s = container_of(e, LinuxAioState, e);
struct io_event *events;
- if (!io_getevents_peek(s->ctx, &events)) {
- return false;
- }
+ return io_getevents_peek(s->ctx, &events);
+}
+
+static void qemu_laio_poll_ready(EventNotifier *opaque)
+{
+ EventNotifier *e = opaque;
+ LinuxAioState *s = container_of(e, LinuxAioState, e);
qemu_laio_process_completions_and_submit(s);
- return true;
}
static void ioq_init(LaioQueue *io_q)
@@ -427,7 +430,7 @@ int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
{
- aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
+ aio_set_event_notifier(old_context, &s->e, false, NULL, NULL, NULL);
qemu_bh_delete(s->completion_bh);
s->aio_context = NULL;
}
@@ -438,7 +441,8 @@ void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
aio_set_event_notifier(new_context, &s->e, false,
qemu_laio_completion_cb,
- qemu_laio_poll_cb);
+ qemu_laio_poll_cb,
+ qemu_laio_poll_ready);
}
LinuxAioState *laio_init(Error **errp)
diff --git a/block/nfs.c b/block/nfs.c
index 577aea1d22..444c40b458 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -197,7 +197,7 @@ static void nfs_set_events(NFSClient *client)
false,
(ev & POLLIN) ? nfs_process_read : NULL,
(ev & POLLOUT) ? nfs_process_write : NULL,
- NULL, client);
+ NULL, NULL, client);
}
client->events = ev;
@@ -372,7 +372,7 @@ static void nfs_detach_aio_context(BlockDriverState *bs)
NFSClient *client = bs->opaque;
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
- false, NULL, NULL, NULL, NULL);
+ false, NULL, NULL, NULL, NULL, NULL);
client->events = 0;
}
@@ -390,7 +390,7 @@ static void nfs_client_close(NFSClient *client)
if (client->context) {
qemu_mutex_lock(&client->mutex);
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
- false, NULL, NULL, NULL, NULL);
+ false, NULL, NULL, NULL, NULL, NULL);
qemu_mutex_unlock(&client->mutex);
if (client->fh) {
nfs_close(client->context, client->fh);
diff --git a/block/nvme.c b/block/nvme.c
index fa360b9b3c..dd20de3865 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -605,10 +605,8 @@ out:
return ret;
}
-static bool nvme_poll_queue(NVMeQueuePair *q)
+static void nvme_poll_queue(NVMeQueuePair *q)
{
- bool progress = false;
-
const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
@@ -619,30 +617,23 @@ static bool nvme_poll_queue(NVMeQueuePair *q)
* cannot race with itself.
*/
if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
- return false;
+ return;
}
qemu_mutex_lock(&q->lock);
while (nvme_process_completion(q)) {
/* Keep polling */
- progress = true;
}
qemu_mutex_unlock(&q->lock);
-
- return progress;
}
-static bool nvme_poll_queues(BDRVNVMeState *s)
+static void nvme_poll_queues(BDRVNVMeState *s)
{
- bool progress = false;
int i;
for (i = 0; i < s->queue_count; i++) {
- if (nvme_poll_queue(s->queues[i])) {
- progress = true;
- }
+ nvme_poll_queue(s->queues[i]);
}
- return progress;
}
static void nvme_handle_event(EventNotifier *n)
@@ -703,8 +694,30 @@ static bool nvme_poll_cb(void *opaque)
EventNotifier *e = opaque;
BDRVNVMeState *s = container_of(e, BDRVNVMeState,
irq_notifier[MSIX_SHARED_IRQ_IDX]);
+ int i;
- return nvme_poll_queues(s);
+ for (i = 0; i < s->queue_count; i++) {
+ NVMeQueuePair *q = s->queues[i];
+ const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
+ NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
+
+ /*
+ * q->lock isn't needed because nvme_process_completion() only runs in
+ * the event loop thread and cannot race with itself.
+ */
+ if ((le16_to_cpu(cqe->status) & 0x1) != q->cq_phase) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static void nvme_poll_ready(EventNotifier *e)
+{
+ BDRVNVMeState *s = container_of(e, BDRVNVMeState,
+ irq_notifier[MSIX_SHARED_IRQ_IDX]);
+
+ nvme_poll_queues(s);
}
static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
@@ -839,7 +852,8 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
}
aio_set_event_notifier(bdrv_get_aio_context(bs),
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
- false, nvme_handle_event, nvme_poll_cb);
+ false, nvme_handle_event, nvme_poll_cb,
+ nvme_poll_ready);
if (!nvme_identify(bs, namespace, errp)) {
ret = -EIO;
@@ -924,7 +938,7 @@ static void nvme_close(BlockDriverState *bs)
g_free(s->queues);
aio_set_event_notifier(bdrv_get_aio_context(bs),
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
- false, NULL, NULL);
+ false, NULL, NULL, NULL);
event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map,
0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE);
@@ -1520,7 +1534,7 @@ static void nvme_detach_aio_context(BlockDriverState *bs)
aio_set_event_notifier(bdrv_get_aio_context(bs),
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
- false, NULL, NULL);
+ false, NULL, NULL, NULL);
}
static void nvme_attach_aio_context(BlockDriverState *bs,
@@ -1530,7 +1544,8 @@ static void nvme_attach_aio_context(BlockDriverState *bs,
s->aio_context = new_context;
aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
- false, nvme_handle_event, nvme_poll_cb);
+ false, nvme_handle_event, nvme_poll_cb,
+ nvme_poll_ready);
for (unsigned i = 0; i < s->queue_count; i++) {
NVMeQueuePair *q = s->queues[i];
diff --git a/block/ssh.c b/block/ssh.c
index e0fbb4934b..3b5bf34031 100644
--- a/block/ssh.c
+++ b/block/ssh.c
@@ -990,7 +990,7 @@ static void restart_coroutine(void *opaque)
AioContext *ctx = bdrv_get_aio_context(bs);
trace_ssh_restart_coroutine(restart->co);
- aio_set_fd_handler(ctx, s->sock, false, NULL, NULL, NULL, NULL);
+ aio_set_fd_handler(ctx, s->sock, false, NULL, NULL, NULL, NULL, NULL);
aio_co_wake(restart->co);
}
@@ -1020,7 +1020,7 @@ static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs)
trace_ssh_co_yield(s->sock, rd_handler, wr_handler);
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
- false, rd_handler, wr_handler, NULL, &restart);
+ false, rd_handler, wr_handler, NULL, NULL, &restart);
qemu_coroutine_yield();
trace_ssh_co_yield_back(s->sock);
}
diff --git a/block/win32-aio.c b/block/win32-aio.c
index b7221a272f..c57e10c997 100644
--- a/block/win32-aio.c
+++ b/block/win32-aio.c
@@ -172,7 +172,7 @@ int win32_aio_attach(QEMUWin32AIOState *aio, HANDLE hfile)
void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
AioContext *old_context)
{
- aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL);
+ aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL, NULL);
aio->aio_ctx = NULL;
}
@@ -181,7 +181,7 @@ void win32_aio_attach_aio_context(QEMUWin32AIOState *aio,
{
aio->aio_ctx = new_context;
aio_set_event_notifier(new_context, &aio->e, false,
- win32_aio_completion_cb, NULL);
+ win32_aio_completion_cb, NULL, NULL);
}
QEMUWin32AIOState *win32_aio_init(void)