aboutsummaryrefslogtreecommitdiff
path: root/hw/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'hw/scsi')
-rw-r--r--hw/scsi/scsi-bus.c183
-rw-r--r--hw/scsi/scsi-disk.c67
-rw-r--r--hw/scsi/scsi-generic.c20
-rw-r--r--hw/scsi/virtio-scsi-dataplane.c8
-rw-r--r--hw/scsi/virtio-scsi.c80
5 files changed, 188 insertions, 170 deletions
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index fc4b77fdb0..5b08cbf60a 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -85,6 +85,89 @@ SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun)
return d;
}
+/*
+ * Invoke @fn() for each enqueued request in device @s. Must be called from the
+ * main loop thread while the guest is stopped. This is only suitable for
+ * vmstate ->put(), use scsi_device_for_each_req_async() for other cases.
+ */
+static void scsi_device_for_each_req_sync(SCSIDevice *s,
+ void (*fn)(SCSIRequest *, void *),
+ void *opaque)
+{
+ SCSIRequest *req;
+ SCSIRequest *next_req;
+
+ assert(!runstate_is_running());
+ assert(qemu_in_main_thread());
+
+ QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
+ fn(req, opaque);
+ }
+}
+
+typedef struct {
+ SCSIDevice *s;
+ void (*fn)(SCSIRequest *, void *);
+ void *fn_opaque;
+} SCSIDeviceForEachReqAsyncData;
+
+static void scsi_device_for_each_req_async_bh(void *opaque)
+{
+ g_autofree SCSIDeviceForEachReqAsyncData *data = opaque;
+ SCSIDevice *s = data->s;
+ AioContext *ctx;
+ SCSIRequest *req;
+ SCSIRequest *next;
+
+ /*
+ * If the AioContext changed before this BH was called then reschedule into
+ * the new AioContext before accessing ->requests. This can happen when
+ * scsi_device_for_each_req_async() is called and then the AioContext is
+ * changed before BHs are run.
+ */
+ ctx = blk_get_aio_context(s->conf.blk);
+ if (ctx != qemu_get_current_aio_context()) {
+ aio_bh_schedule_oneshot(ctx, scsi_device_for_each_req_async_bh,
+ g_steal_pointer(&data));
+ return;
+ }
+
+ QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
+ data->fn(req, data->fn_opaque);
+ }
+
+ /* Drop the reference taken by scsi_device_for_each_req_async() */
+ object_unref(OBJECT(s));
+}
+
+/*
+ * Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
+ * runs in the AioContext that is executing the request.
+ */
+static void scsi_device_for_each_req_async(SCSIDevice *s,
+ void (*fn)(SCSIRequest *, void *),
+ void *opaque)
+{
+ assert(qemu_in_main_thread());
+
+ SCSIDeviceForEachReqAsyncData *data =
+ g_new(SCSIDeviceForEachReqAsyncData, 1);
+
+ data->s = s;
+ data->fn = fn;
+ data->fn_opaque = opaque;
+
+ /*
+ * Hold a reference to the SCSIDevice until
+ * scsi_device_for_each_req_async_bh() finishes.
+ */
+ object_ref(OBJECT(s));
+
+ aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
+ scsi_device_for_each_req_async_bh,
+ data);
+}
+
static void scsi_device_realize(SCSIDevice *s, Error **errp)
{
SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
@@ -144,20 +227,18 @@ void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host,
qbus_set_bus_hotplug_handler(BUS(bus));
}
-static void scsi_dma_restart_bh(void *opaque)
+void scsi_req_retry(SCSIRequest *req)
{
- SCSIDevice *s = opaque;
- SCSIRequest *req, *next;
-
- qemu_bh_delete(s->bh);
- s->bh = NULL;
+ req->retry = true;
+}
- aio_context_acquire(blk_get_aio_context(s->conf.blk));
- QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
- scsi_req_ref(req);
- if (req->retry) {
- req->retry = false;
- switch (req->cmd.mode) {
+/* Called in the AioContext that is executing the request */
+static void scsi_dma_restart_req(SCSIRequest *req, void *opaque)
+{
+ scsi_req_ref(req);
+ if (req->retry) {
+ req->retry = false;
+ switch (req->cmd.mode) {
case SCSI_XFER_FROM_DEV:
case SCSI_XFER_TO_DEV:
scsi_req_continue(req);
@@ -166,37 +247,22 @@ static void scsi_dma_restart_bh(void *opaque)
scsi_req_dequeue(req);
scsi_req_enqueue(req);
break;
- }
}
- scsi_req_unref(req);
}
- aio_context_release(blk_get_aio_context(s->conf.blk));
- /* Drop the reference that was acquired in scsi_dma_restart_cb */
- object_unref(OBJECT(s));
-}
-
-void scsi_req_retry(SCSIRequest *req)
-{
- /* No need to save a reference, because scsi_dma_restart_bh just
- * looks at the request list. */
- req->retry = true;
+ scsi_req_unref(req);
}
static void scsi_dma_restart_cb(void *opaque, bool running, RunState state)
{
SCSIDevice *s = opaque;
+ assert(qemu_in_main_thread());
+
if (!running) {
return;
}
- if (!s->bh) {
- AioContext *ctx = blk_get_aio_context(s->conf.blk);
- /* The reference is dropped in scsi_dma_restart_bh.*/
- object_ref(OBJECT(s));
- s->bh = aio_bh_new_guarded(ctx, scsi_dma_restart_bh, s,
- &DEVICE(s)->mem_reentrancy_guard);
- qemu_bh_schedule(s->bh);
- }
+
+ scsi_device_for_each_req_async(s, scsi_dma_restart_req, NULL);
}
static bool scsi_bus_is_address_free(SCSIBus *bus,
@@ -1657,17 +1723,16 @@ void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
}
}
+static void scsi_device_purge_one_req(SCSIRequest *req, void *opaque)
+{
+ scsi_req_cancel_async(req, NULL);
+}
+
void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
{
- SCSIRequest *req;
+ scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL);
- aio_context_acquire(blk_get_aio_context(sdev->conf.blk));
- while (!QTAILQ_EMPTY(&sdev->requests)) {
- req = QTAILQ_FIRST(&sdev->requests);
- scsi_req_cancel_async(req, NULL);
- }
blk_drain(sdev->conf.blk);
- aio_context_release(blk_get_aio_context(sdev->conf.blk));
scsi_device_set_ua(sdev, sense);
}
@@ -1737,31 +1802,33 @@ static char *scsibus_get_fw_dev_path(DeviceState *dev)
/* SCSI request list. For simplicity, pv points to the whole device */
+static void put_scsi_req(SCSIRequest *req, void *opaque)
+{
+ QEMUFile *f = opaque;
+
+ assert(!req->io_canceled);
+ assert(req->status == -1 && req->host_status == -1);
+ assert(req->enqueued);
+
+ qemu_put_sbyte(f, req->retry ? 1 : 2);
+ qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
+ qemu_put_be32s(f, &req->tag);
+ qemu_put_be32s(f, &req->lun);
+ if (req->bus->info->save_request) {
+ req->bus->info->save_request(f, req);
+ }
+ if (req->ops->save_request) {
+ req->ops->save_request(f, req);
+ }
+}
+
static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
const VMStateField *field, JSONWriter *vmdesc)
{
SCSIDevice *s = pv;
- SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
- SCSIRequest *req;
- QTAILQ_FOREACH(req, &s->requests, next) {
- assert(!req->io_canceled);
- assert(req->status == -1 && req->host_status == -1);
- assert(req->enqueued);
-
- qemu_put_sbyte(f, req->retry ? 1 : 2);
- qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
- qemu_put_be32s(f, &req->tag);
- qemu_put_be32s(f, &req->lun);
- if (bus->info->save_request) {
- bus->info->save_request(f, req);
- }
- if (req->ops->save_request) {
- req->ops->save_request(f, req);
- }
- }
+ scsi_device_for_each_req_sync(s, put_scsi_req, f);
qemu_put_sbyte(f, 0);
-
return 0;
}
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index 6691f5edb8..2e7e1e9a1c 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -273,7 +273,9 @@ static void scsi_aio_complete(void *opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
+ /* The request must only run in the BlockBackend's AioContext */
+ assert(blk_get_aio_context(s->qdev.conf.blk) ==
+ qemu_get_current_aio_context());
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -286,7 +288,6 @@ static void scsi_aio_complete(void *opaque, int ret)
scsi_req_complete(&r->req, GOOD);
done:
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
scsi_req_unref(&r->req);
}
@@ -354,7 +355,6 @@ done:
scsi_req_unref(&r->req);
}
-/* Called with AioContext lock held */
static void scsi_dma_complete(void *opaque, int ret)
{
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
@@ -373,8 +373,13 @@ static void scsi_dma_complete(void *opaque, int ret)
static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
{
+ SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint32_t n;
+ /* The request must only run in the BlockBackend's AioContext */
+ assert(blk_get_aio_context(s->qdev.conf.blk) ==
+ qemu_get_current_aio_context());
+
assert(r->req.aiocb == NULL);
if (scsi_disk_req_check_error(r, ret, false)) {
goto done;
@@ -394,8 +399,6 @@ static void scsi_read_complete(void *opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
-
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -406,7 +409,6 @@ static void scsi_read_complete(void *opaque, int ret)
trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
}
scsi_read_complete_noio(r, ret);
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
/* Actually issue a read to the block device. */
@@ -448,8 +450,6 @@ static void scsi_do_read_cb(void *opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
-
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -459,7 +459,6 @@ static void scsi_do_read_cb(void *opaque, int ret)
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_do_read(opaque, ret);
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
/* Read more data from scsi device into buffer. */
@@ -505,8 +504,13 @@ static void scsi_read_data(SCSIRequest *req)
static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
{
+ SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint32_t n;
+ /* The request must only run in the BlockBackend's AioContext */
+ assert(blk_get_aio_context(s->qdev.conf.blk) ==
+ qemu_get_current_aio_context());
+
assert (r->req.aiocb == NULL);
if (scsi_disk_req_check_error(r, ret, false)) {
goto done;
@@ -533,8 +537,6 @@ static void scsi_write_complete(void * opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
-
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -544,7 +546,6 @@ static void scsi_write_complete(void * opaque, int ret)
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_write_complete_noio(r, ret);
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_write_data(SCSIRequest *req)
@@ -1742,8 +1743,6 @@ static void scsi_unmap_complete(void *opaque, int ret)
SCSIDiskReq *r = data->r;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
-
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -1754,7 +1753,6 @@ static void scsi_unmap_complete(void *opaque, int ret)
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
scsi_unmap_complete_noio(data, ret);
}
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
@@ -1822,8 +1820,6 @@ static void scsi_write_same_complete(void *opaque, int ret)
SCSIDiskReq *r = data->r;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
-
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -1847,7 +1843,6 @@ static void scsi_write_same_complete(void *opaque, int ret)
data->sector << BDRV_SECTOR_BITS,
&data->qiov, 0,
scsi_write_same_complete, data);
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
return;
}
@@ -1857,7 +1852,6 @@ done:
scsi_req_unref(&r->req);
qemu_vfree(data->iov.iov_base);
g_free(data);
- aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
@@ -2344,14 +2338,10 @@ static void scsi_disk_reset(DeviceState *dev)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
uint64_t nb_sectors;
- AioContext *ctx;
scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
- ctx = blk_get_aio_context(s->qdev.conf.blk);
- aio_context_acquire(ctx);
blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
- aio_context_release(ctx);
nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
if (nb_sectors) {
@@ -2550,15 +2540,13 @@ static void scsi_unrealize(SCSIDevice *dev)
static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
- AioContext *ctx = NULL;
+
/* can happen for devices without drive. The error message for missing
* backend will be issued in scsi_realize
*/
if (s->qdev.conf.blk) {
- ctx = blk_get_aio_context(s->qdev.conf.blk);
- aio_context_acquire(ctx);
if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
- goto out;
+ return;
}
}
s->qdev.blocksize = s->qdev.conf.logical_block_size;
@@ -2567,16 +2555,11 @@ static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
s->product = g_strdup("QEMU HARDDISK");
}
scsi_realize(&s->qdev, errp);
-out:
- if (ctx) {
- aio_context_release(ctx);
- }
}
static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
- AioContext *ctx;
int ret;
uint32_t blocksize = 2048;
@@ -2592,8 +2575,6 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
blocksize = dev->conf.physical_block_size;
}
- ctx = blk_get_aio_context(dev->conf.blk);
- aio_context_acquire(ctx);
s->qdev.blocksize = blocksize;
s->qdev.type = TYPE_ROM;
s->features |= 1 << SCSI_DISK_F_REMOVABLE;
@@ -2601,7 +2582,6 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
s->product = g_strdup("QEMU CD-ROM");
}
scsi_realize(&s->qdev, errp);
- aio_context_release(ctx);
}
@@ -2732,7 +2712,6 @@ static int get_device_type(SCSIDiskState *s)
static void scsi_block_realize(SCSIDevice *dev, Error **errp)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
- AioContext *ctx;
int sg_version;
int rc;
@@ -2747,9 +2726,6 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp)
"be removed in a future version");
}
- ctx = blk_get_aio_context(s->qdev.conf.blk);
- aio_context_acquire(ctx);
-
/* check we are using a driver managing SG_IO (version 3 and after) */
rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
if (rc < 0) {
@@ -2757,18 +2733,18 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp)
if (rc != -EPERM) {
error_append_hint(errp, "Is this a SCSI device?\n");
}
- goto out;
+ return;
}
if (sg_version < 30000) {
error_setg(errp, "scsi generic interface too old");
- goto out;
+ return;
}
/* get device type from INQUIRY data */
rc = get_device_type(s);
if (rc < 0) {
error_setg(errp, "INQUIRY failed");
- goto out;
+ return;
}
/* Make a guess for the block size, we'll fix it when the guest sends.
@@ -2788,9 +2764,6 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp)
scsi_realize(&s->qdev, errp);
scsi_generic_read_device_inquiry(&s->qdev);
-
-out:
- aio_context_release(ctx);
}
typedef struct SCSIBlockReq {
@@ -2810,7 +2783,6 @@ static void scsi_block_sgio_complete(void *opaque, int ret)
{
SCSIBlockReq *req = (SCSIBlockReq *)opaque;
SCSIDiskReq *r = &req->req;
- SCSIDevice *s = r->req.dev;
sg_io_hdr_t *io_hdr = &req->io_header;
if (ret == 0) {
@@ -2827,13 +2799,10 @@ static void scsi_block_sgio_complete(void *opaque, int ret)
}
if (ret > 0) {
- aio_context_acquire(blk_get_aio_context(s->conf.blk));
if (scsi_handle_rw_error(r, ret, true)) {
- aio_context_release(blk_get_aio_context(s->conf.blk));
scsi_req_unref(&r->req);
return;
}
- aio_context_release(blk_get_aio_context(s->conf.blk));
/* Ignore error. */
ret = 0;
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
index 2417f0ad84..b7b04e1d63 100644
--- a/hw/scsi/scsi-generic.c
+++ b/hw/scsi/scsi-generic.c
@@ -109,15 +109,11 @@ done:
static void scsi_command_complete(void *opaque, int ret)
{
SCSIGenericReq *r = (SCSIGenericReq *)opaque;
- SCSIDevice *s = r->req.dev;
-
- aio_context_acquire(blk_get_aio_context(s->conf.blk));
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
scsi_command_complete_noio(r, ret);
- aio_context_release(blk_get_aio_context(s->conf.blk));
}
static int execute_command(BlockBackend *blk,
@@ -274,14 +270,12 @@ static void scsi_read_complete(void * opaque, int ret)
SCSIDevice *s = r->req.dev;
int len;
- aio_context_acquire(blk_get_aio_context(s->conf.blk));
-
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
if (ret || r->req.io_canceled) {
scsi_command_complete_noio(r, ret);
- goto done;
+ return;
}
len = r->io_header.dxfer_len - r->io_header.resid;
@@ -320,7 +314,7 @@ static void scsi_read_complete(void * opaque, int ret)
r->io_header.status != GOOD ||
len == 0) {
scsi_command_complete_noio(r, 0);
- goto done;
+ return;
}
/* Snoop READ CAPACITY output to set the blocksize. */
@@ -356,9 +350,6 @@ static void scsi_read_complete(void * opaque, int ret)
req_complete:
scsi_req_data(&r->req, len);
scsi_req_unref(&r->req);
-
-done:
- aio_context_release(blk_get_aio_context(s->conf.blk));
}
/* Read more data from scsi device into buffer. */
@@ -391,14 +382,12 @@ static void scsi_write_complete(void * opaque, int ret)
trace_scsi_generic_write_complete(ret);
- aio_context_acquire(blk_get_aio_context(s->conf.blk));
-
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
if (ret || r->req.io_canceled) {
scsi_command_complete_noio(r, ret);
- goto done;
+ return;
}
if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
@@ -408,9 +397,6 @@ static void scsi_write_complete(void * opaque, int ret)
}
scsi_command_complete_noio(r, ret);
-
-done:
- aio_context_release(blk_get_aio_context(s->conf.blk));
}
/* Write data to a scsi device. Returns nonzero on failure.
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 1e684beebe..135e23fe54 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -149,23 +149,17 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
memory_region_transaction_commit();
- /*
- * These fields are visible to the IOThread so we rely on implicit barriers
- * in aio_context_acquire() on the write side and aio_notify_accept() on
- * the read side.
- */
s->dataplane_starting = false;
s->dataplane_started = true;
+ smp_wmb(); /* paired with aio_notify_accept() */
if (s->bus.drain_count == 0) {
- aio_context_acquire(s->ctx);
virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx);
for (i = 0; i < vs->conf.num_queues; i++) {
virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);
}
- aio_context_release(s->ctx);
}
return 0;
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 9c751bf296..ca365a70e9 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -123,6 +123,30 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
virtio_scsi_free_req(req);
}
+static void virtio_scsi_complete_req_bh(void *opaque)
+{
+ VirtIOSCSIReq *req = opaque;
+
+ virtio_scsi_complete_req(req);
+}
+
+/*
+ * Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop
+ * thread cannot touch the virtqueue since that could race with an IOThread.
+ */
+static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req)
+{
+ VirtIOSCSI *s = req->dev;
+
+ if (!s->ctx || s->ctx == qemu_get_aio_context()) {
+ /* No need to schedule a BH when there is no IOThread */
+ virtio_scsi_complete_req(req);
+ } else {
+ /* Run request completion in the IOThread */
+ aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req);
+ }
+}
+
static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
{
virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
@@ -338,10 +362,7 @@ static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
out:
object_unref(OBJECT(d));
-
- virtio_scsi_acquire(s);
- virtio_scsi_complete_req(req);
- virtio_scsi_release(s);
+ virtio_scsi_complete_req_from_main_loop(req);
}
/* Some TMFs must be processed from the main loop thread */
@@ -354,18 +375,16 @@ static void virtio_scsi_do_tmf_bh(void *opaque)
GLOBAL_STATE_CODE();
- virtio_scsi_acquire(s);
+ WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
+ QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
+ QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
+ QTAILQ_INSERT_TAIL(&reqs, req, next);
+ }
- QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
- QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
- QTAILQ_INSERT_TAIL(&reqs, req, next);
+ qemu_bh_delete(s->tmf_bh);
+ s->tmf_bh = NULL;
}
- qemu_bh_delete(s->tmf_bh);
- s->tmf_bh = NULL;
-
- virtio_scsi_release(s);
-
QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) {
QTAILQ_REMOVE(&reqs, req, next);
virtio_scsi_do_one_tmf_bh(req);
@@ -379,8 +398,7 @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
GLOBAL_STATE_CODE();
- virtio_scsi_acquire(s);
-
+ /* Called after ioeventfd has been stopped, so tmf_bh_lock is not needed */
if (s->tmf_bh) {
qemu_bh_delete(s->tmf_bh);
s->tmf_bh = NULL;
@@ -393,19 +411,19 @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
virtio_scsi_complete_req(req);
}
-
- virtio_scsi_release(s);
}
static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
{
VirtIOSCSI *s = req->dev;
- QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
+ WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
+ QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
- if (!s->tmf_bh) {
- s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
- qemu_bh_schedule(s->tmf_bh);
+ if (!s->tmf_bh) {
+ s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
+ qemu_bh_schedule(s->tmf_bh);
+ }
}
}
@@ -624,9 +642,7 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
return;
}
- virtio_scsi_acquire(s);
virtio_scsi_handle_ctrl_vq(s, vq);
- virtio_scsi_release(s);
}
static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
@@ -864,9 +880,7 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
return;
}
- virtio_scsi_acquire(s);
virtio_scsi_handle_cmd_vq(s, vq);
- virtio_scsi_release(s);
}
static void virtio_scsi_get_config(VirtIODevice *vdev,
@@ -1013,9 +1027,7 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
return;
}
- virtio_scsi_acquire(s);
virtio_scsi_handle_event_vq(s, vq);
- virtio_scsi_release(s);
}
static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
@@ -1034,9 +1046,7 @@ static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
},
};
- virtio_scsi_acquire(s);
virtio_scsi_push_event(s, &info);
- virtio_scsi_release(s);
}
}
@@ -1053,17 +1063,13 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
SCSIDevice *sd = SCSI_DEVICE(dev);
- AioContext *old_context;
int ret;
if (s->ctx && !s->dataplane_fenced) {
if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
return;
}
- old_context = blk_get_aio_context(sd->conf.blk);
- aio_context_acquire(old_context);
ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
- aio_context_release(old_context);
if (ret < 0) {
return;
}
@@ -1079,10 +1085,8 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
},
};
- virtio_scsi_acquire(s);
virtio_scsi_push_event(s, &info);
scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
- virtio_scsi_release(s);
}
}
@@ -1104,17 +1108,13 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
if (s->ctx) {
- virtio_scsi_acquire(s);
/* If other users keep the BlockBackend in the iothread, that's ok */
blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
- virtio_scsi_release(s);
}
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
- virtio_scsi_acquire(s);
virtio_scsi_push_event(s, &info);
scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
- virtio_scsi_release(s);
}
}
@@ -1235,6 +1235,7 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
Error *err = NULL;
QTAILQ_INIT(&s->tmf_bh_list);
+ qemu_mutex_init(&s->tmf_bh_lock);
virtio_scsi_common_realize(dev,
virtio_scsi_handle_ctrl,
@@ -1277,6 +1278,7 @@ static void virtio_scsi_device_unrealize(DeviceState *dev)
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
virtio_scsi_common_unrealize(dev);
+ qemu_mutex_destroy(&s->tmf_bh_lock);
}
static Property virtio_scsi_properties[] = {