diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2011-10-28 18:03:58 +0200 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2011-12-05 14:51:34 +0100 |
commit | 4e5b184d63b5f031800c3ac215eebac126b1ab31 (patch) | |
tree | 7d4c5fc43b81da38c233e703b989f47b106d0061 | |
parent | 23e9a39e7db60a8cbe4ac1291d4f4feab08930ce (diff) |
xen_disk: remove dead code
Xen_disk.c has support for using synchronous I/O instead of asynchronous,
but it is compiled out by default. Remove it.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
-rw-r--r-- | hw/xen_disk.c | 86 |
1 files changed, 2 insertions, 84 deletions
diff --git a/hw/xen_disk.c b/hw/xen_disk.c index 286bbac54a..192e81746f 100644 --- a/hw/xen_disk.c +++ b/hw/xen_disk.c @@ -49,7 +49,6 @@ static int syncwrite = 0; static int batch_maps = 0; static int max_requests = 32; -static int use_aio = 1; /* ------------------------------------------------------------- */ @@ -314,76 +313,6 @@ static int ioreq_map(struct ioreq *ioreq) return 0; } -static int ioreq_runio_qemu_sync(struct ioreq *ioreq) -{ - struct XenBlkDev *blkdev = ioreq->blkdev; - int i, rc; - off_t pos; - - if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { - goto err_no_map; - } - if (ioreq->presync) { - bdrv_flush(blkdev->bs); - } - - switch (ioreq->req.operation) { - case BLKIF_OP_READ: - pos = ioreq->start; - for (i = 0; i < ioreq->v.niov; i++) { - rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE, - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len / BLOCK_SIZE); - if (rc != 0) { - xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n", - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len); - goto err; - } - pos += ioreq->v.iov[i].iov_len; - } - break; - case BLKIF_OP_WRITE: - case BLKIF_OP_WRITE_BARRIER: - if (!ioreq->req.nr_segments) { - break; - } - pos = ioreq->start; - for (i = 0; i < ioreq->v.niov; i++) { - rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE, - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len / BLOCK_SIZE); - if (rc != 0) { - xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n", - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len); - goto err; - } - pos += ioreq->v.iov[i].iov_len; - } - break; - default: - /* unknown operation (shouldn't happen -- parse catches this) */ - goto err; - } - - if (ioreq->postsync) { - bdrv_flush(blkdev->bs); - } - ioreq->status = BLKIF_RSP_OKAY; - - ioreq_unmap(ioreq); - ioreq_finish(ioreq); - return 0; - -err: - ioreq_unmap(ioreq); -err_no_map: - ioreq_finish(ioreq); - ioreq->status = BLKIF_RSP_ERROR; - return -1; -} - static void qemu_aio_complete(void *opaque, int ret) { struct ioreq *ioreq = opaque; @@ -554,9 +483,7 @@ static void blk_handle_requests(struct XenBlkDev *blkdev) rp = blkdev->rings.common.sring->req_prod; xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ - if (use_aio) { - blk_send_response_all(blkdev); - } + blk_send_response_all(blkdev); while (rc != rp) { /* pull request from ring */ if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) { @@ -579,16 +506,7 @@ static void blk_handle_requests(struct XenBlkDev *blkdev) continue; } - if (use_aio) { - /* run i/o in aio mode */ - ioreq_runio_qemu_aio(ioreq); - } else { - /* run i/o in sync mode */ - ioreq_runio_qemu_sync(ioreq); - } - } - if (!use_aio) { - blk_send_response_all(blkdev); + ioreq_runio_qemu_aio(ioreq); } if (blkdev->more_work && blkdev->requests_inflight < max_requests) { |