aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorPeter Lieven <pl@kamp.de>2013-12-14 17:31:40 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2013-12-16 11:25:51 +0100
commit8b9dfe9098d91e06a3dd6376624307fe5fa13be8 (patch)
tree9802b09cee15c53db368e5351bc0a9055a45433d /block
parente157b8fdd412d48eacfbb8c67d3d58780154faa3 (diff)
block/iscsi: use a bh to schedule co reentrance
this fixes a potential segfault and performance regression. If the coroutine is reentered directly in the iscsi_co_generic_cb iscsi_process_{read,write} are interrupted and reentered any time later. One the one hand this could happen after an iscsi_close where the iscsi context is already gone (segfault). On the other hand this limits the number of processed callbacks in each aio_dispatch to one (potential performance regression). Cc: qemu-stable@nongnu.org Signed-off-by: Peter Lieven <pl@kamp.de> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/iscsi.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/block/iscsi.c b/block/iscsi.c
index fa69408df9..b0e6eea199 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -68,6 +68,7 @@ typedef struct IscsiTask {
int do_retry;
struct scsi_task *task;
Coroutine *co;
+ QEMUBH *bh;
} IscsiTask;
typedef struct IscsiAIOCB {
@@ -123,6 +124,13 @@ iscsi_schedule_bh(IscsiAIOCB *acb)
qemu_bh_schedule(acb->bh);
}
+static void iscsi_co_generic_bh_cb(void *opaque)
+{
+ struct IscsiTask *iTask = opaque;
+ qemu_bh_delete(iTask->bh);
+ qemu_coroutine_enter(iTask->co, NULL);
+}
+
static void
iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
void *command_data, void *opaque)
@@ -147,7 +155,8 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
out:
if (iTask->co) {
- qemu_coroutine_enter(iTask->co, NULL);
+ iTask->bh = qemu_bh_new(iscsi_co_generic_bh_cb, iTask);
+ qemu_bh_schedule(iTask->bh);
}
}