diff options
author | Fam Zheng <famz@redhat.com> | 2015-01-20 11:28:47 +0800 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2015-01-23 18:17:05 +0100 |
commit | 1dc936aa84b300940b2797c391cc3ca519bc78ce (patch) | |
tree | 5659e24ad49a726dc65ff65c051c48723f2bd81d /hw/block | |
parent | 75344fa4c5d6a3ebe2dec2d87cbea8524f5d0f42 (diff) |
virtio-blk: Use blk_aio_ioctl
Use the asynchronous interface of ioctl. This will not make the VM
unresponsive if the ioctl takes a long time.
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'hw/block')
-rw-r--r-- | hw/block/virtio-blk.c | 125 |
1 files changed, 79 insertions, 46 deletions
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index 60cb1d88ed..4032fcae27 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -115,6 +115,56 @@ static void virtio_blk_flush_complete(void *opaque, int ret) virtio_blk_free_request(req); } +#ifdef __linux__ + +typedef struct { + VirtIOBlockReq *req; + struct sg_io_hdr hdr; +} VirtIOBlockIoctlReq; + +static void virtio_blk_ioctl_complete(void *opaque, int status) +{ + VirtIOBlockIoctlReq *ioctl_req = opaque; + VirtIOBlockReq *req = ioctl_req->req; + VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); + struct virtio_scsi_inhdr *scsi; + struct sg_io_hdr *hdr; + + scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base; + + if (status) { + status = VIRTIO_BLK_S_UNSUPP; + virtio_stl_p(vdev, &scsi->errors, 255); + goto out; + } + + hdr = &ioctl_req->hdr; + /* + * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi) + * clear the masked_status field [hence status gets cleared too, see + * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED + * status has occurred. However they do set DRIVER_SENSE in driver_status + * field. Also a (sb_len_wr > 0) indicates there is a sense buffer. + */ + if (hdr->status == 0 && hdr->sb_len_wr > 0) { + hdr->status = CHECK_CONDITION; + } + + virtio_stl_p(vdev, &scsi->errors, + hdr->status | (hdr->msg_status << 8) | + (hdr->host_status << 16) | (hdr->driver_status << 24)); + virtio_stl_p(vdev, &scsi->residual, hdr->resid); + virtio_stl_p(vdev, &scsi->sense_len, hdr->sb_len_wr); + virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len); + +out: + virtio_blk_req_complete(req, status); + virtio_blk_free_request(req); + g_free(ioctl_req); +} + +#endif + static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s) { VirtIOBlockReq *req = virtio_blk_alloc_request(s); @@ -137,7 +187,7 @@ static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req) #ifdef __linux__ int i; - struct sg_io_hdr hdr; + VirtIOBlockIoctlReq *ioctl_req; #endif /* @@ -172,71 +222,52 @@ static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req) } #ifdef __linux__ - memset(&hdr, 0, sizeof(struct sg_io_hdr)); - hdr.interface_id = 'S'; - hdr.cmd_len = elem->out_sg[1].iov_len; - hdr.cmdp = elem->out_sg[1].iov_base; - hdr.dxfer_len = 0; + ioctl_req = g_new0(VirtIOBlockIoctlReq, 1); + ioctl_req->req = req; + ioctl_req->hdr.interface_id = 'S'; + ioctl_req->hdr.cmd_len = elem->out_sg[1].iov_len; + ioctl_req->hdr.cmdp = elem->out_sg[1].iov_base; + ioctl_req->hdr.dxfer_len = 0; if (elem->out_num > 2) { /* * If there are more than the minimally required 2 output segments * there is write payload starting from the third iovec. */ - hdr.dxfer_direction = SG_DXFER_TO_DEV; - hdr.iovec_count = elem->out_num - 2; + ioctl_req->hdr.dxfer_direction = SG_DXFER_TO_DEV; + ioctl_req->hdr.iovec_count = elem->out_num - 2; - for (i = 0; i < hdr.iovec_count; i++) - hdr.dxfer_len += elem->out_sg[i + 2].iov_len; + for (i = 0; i < ioctl_req->hdr.iovec_count; i++) { + ioctl_req->hdr.dxfer_len += elem->out_sg[i + 2].iov_len; + } - hdr.dxferp = elem->out_sg + 2; + ioctl_req->hdr.dxferp = elem->out_sg + 2; } else if (elem->in_num > 3) { /* * If we have more than 3 input segments the guest wants to actually * read data. */ - hdr.dxfer_direction = SG_DXFER_FROM_DEV; - hdr.iovec_count = elem->in_num - 3; - for (i = 0; i < hdr.iovec_count; i++) - hdr.dxfer_len += elem->in_sg[i].iov_len; + ioctl_req->hdr.dxfer_direction = SG_DXFER_FROM_DEV; + ioctl_req->hdr.iovec_count = elem->in_num - 3; + for (i = 0; i < ioctl_req->hdr.iovec_count; i++) { + ioctl_req->hdr.dxfer_len += elem->in_sg[i].iov_len; + } - hdr.dxferp = elem->in_sg; + ioctl_req->hdr.dxferp = elem->in_sg; } else { /* * Some SCSI commands don't actually transfer any data. */ - hdr.dxfer_direction = SG_DXFER_NONE; + ioctl_req->hdr.dxfer_direction = SG_DXFER_NONE; } - hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base; - hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len; - - status = blk_ioctl(blk->blk, SG_IO, &hdr); - if (status) { - status = VIRTIO_BLK_S_UNSUPP; - goto fail; - } + ioctl_req->hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base; + ioctl_req->hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len; - /* - * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi) - * clear the masked_status field [hence status gets cleared too, see - * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED - * status has occurred. However they do set DRIVER_SENSE in driver_status - * field. Also a (sb_len_wr > 0) indicates there is a sense buffer. - */ - if (hdr.status == 0 && hdr.sb_len_wr > 0) { - hdr.status = CHECK_CONDITION; - } - - virtio_stl_p(vdev, &scsi->errors, - hdr.status | (hdr.msg_status << 8) | - (hdr.host_status << 16) | (hdr.driver_status << 24)); - virtio_stl_p(vdev, &scsi->residual, hdr.resid); - virtio_stl_p(vdev, &scsi->sense_len, hdr.sb_len_wr); - virtio_stl_p(vdev, &scsi->data_len, hdr.dxfer_len); - - return status; + blk_aio_ioctl(blk->blk, SG_IO, &ioctl_req->hdr, + virtio_blk_ioctl_complete, ioctl_req); + return -EINPROGRESS; #else abort(); #endif @@ -254,8 +285,10 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req) int status; status = virtio_blk_handle_scsi_req(req); - virtio_blk_req_complete(req, status); - virtio_blk_free_request(req); + if (status != -EINPROGRESS) { + virtio_blk_req_complete(req, status); + virtio_blk_free_request(req); + } } void virtio_submit_multiwrite(BlockBackend *blk, MultiReqBuffer *mrb) |