diff options
Diffstat (limited to 'hw/xen_disk.c')
-rw-r--r-- | hw/xen_disk.c | 492 |
1 files changed, 265 insertions, 227 deletions
diff --git a/hw/xen_disk.c b/hw/xen_disk.c index 558bf8ae25..2b3a8feac7 100644 --- a/hw/xen_disk.c +++ b/hw/xen_disk.c @@ -120,17 +120,18 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) struct ioreq *ioreq = NULL; if (QLIST_EMPTY(&blkdev->freelist)) { - if (blkdev->requests_total >= max_requests) - goto out; - /* allocate new struct */ - ioreq = qemu_mallocz(sizeof(*ioreq)); - ioreq->blkdev = blkdev; - blkdev->requests_total++; + if (blkdev->requests_total >= max_requests) { + goto out; + } + /* allocate new struct */ + ioreq = qemu_mallocz(sizeof(*ioreq)); + ioreq->blkdev = blkdev; + blkdev->requests_total++; qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); } else { - /* get one from freelist */ - ioreq = QLIST_FIRST(&blkdev->freelist); - QLIST_REMOVE(ioreq, list); + /* get one from freelist */ + ioreq = QLIST_FIRST(&blkdev->freelist); + QLIST_REMOVE(ioreq, list); qemu_iovec_reset(&ioreq->v); } QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list); @@ -173,30 +174,32 @@ static int ioreq_parse(struct ioreq *ioreq) int i; xen_be_printf(&blkdev->xendev, 3, - "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n", - ioreq->req.operation, ioreq->req.nr_segments, - ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number); + "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n", + ioreq->req.operation, ioreq->req.nr_segments, + ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number); switch (ioreq->req.operation) { case BLKIF_OP_READ: - ioreq->prot = PROT_WRITE; /* to memory */ - break; + ioreq->prot = PROT_WRITE; /* to memory */ + break; case BLKIF_OP_WRITE_BARRIER: if (!ioreq->req.nr_segments) { ioreq->presync = 1; return 0; } - if (!syncwrite) - ioreq->presync = ioreq->postsync = 1; - /* fall through */ + if (!syncwrite) { + ioreq->presync = ioreq->postsync = 1; + } + /* fall through */ case BLKIF_OP_WRITE: - ioreq->prot = PROT_READ; /* from memory */ - if (syncwrite) - ioreq->postsync = 1; - break; + ioreq->prot = PROT_READ; /* from memory */ + if (syncwrite) { + ioreq->postsync = 1; + } + break; default: - xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n", - ioreq->req.operation); - goto err; + xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n", + ioreq->req.operation); + goto err; }; if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') { @@ -206,29 +209,29 @@ static int ioreq_parse(struct ioreq *ioreq) ioreq->start = ioreq->req.sector_number * blkdev->file_blk; for (i = 0; i < ioreq->req.nr_segments; i++) { - if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { - xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n"); - goto err; - } - if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) { - xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n"); - goto err; - } - if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) { - xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n"); - goto err; - } - - ioreq->domids[i] = blkdev->xendev.dom; - ioreq->refs[i] = ioreq->req.seg[i].gref; - - mem = ioreq->req.seg[i].first_sect * blkdev->file_blk; - len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk; + if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { + xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n"); + goto err; + } + if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) { + xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n"); + goto err; + } + if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) { + xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n"); + goto err; + } + + ioreq->domids[i] = blkdev->xendev.dom; + ioreq->refs[i] = ioreq->req.seg[i].gref; + + mem = ioreq->req.seg[i].first_sect * blkdev->file_blk; + len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk; qemu_iovec_add(&ioreq->v, (void*)mem, len); } if (ioreq->start + ioreq->v.size > blkdev->file_size) { - xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n"); - goto err; + xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n"); + goto err; } return 0; @@ -242,26 +245,31 @@ static void ioreq_unmap(struct ioreq *ioreq) int gnt = ioreq->blkdev->xendev.gnttabdev; int i; - if (ioreq->v.niov == 0) + if (ioreq->v.niov == 0) { return; + } if (batch_maps) { - if (!ioreq->pages) - return; - if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) - xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", - strerror(errno)); - ioreq->blkdev->cnt_map -= ioreq->v.niov; - ioreq->pages = NULL; + if (!ioreq->pages) { + return; + } + if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) { + xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", + strerror(errno)); + } + ioreq->blkdev->cnt_map -= ioreq->v.niov; + ioreq->pages = NULL; } else { - for (i = 0; i < ioreq->v.niov; i++) { - if (!ioreq->page[i]) - continue; - if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) - xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", - strerror(errno)); - ioreq->blkdev->cnt_map--; - ioreq->page[i] = NULL; - } + for (i = 0; i < ioreq->v.niov; i++) { + if (!ioreq->page[i]) { + continue; + } + if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) { + xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", + strerror(errno)); + } + ioreq->blkdev->cnt_map--; + ioreq->page[i] = NULL; + } } } @@ -270,35 +278,37 @@ static int ioreq_map(struct ioreq *ioreq) int gnt = ioreq->blkdev->xendev.gnttabdev; int i; - if (ioreq->v.niov == 0) + if (ioreq->v.niov == 0) { return 0; + } if (batch_maps) { - ioreq->pages = xc_gnttab_map_grant_refs - (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot); - if (ioreq->pages == NULL) { - xen_be_printf(&ioreq->blkdev->xendev, 0, - "can't map %d grant refs (%s, %d maps)\n", - ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map); - return -1; - } - for (i = 0; i < ioreq->v.niov; i++) - ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE + - (uintptr_t)ioreq->v.iov[i].iov_base; - ioreq->blkdev->cnt_map += ioreq->v.niov; + ioreq->pages = xc_gnttab_map_grant_refs + (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot); + if (ioreq->pages == NULL) { + xen_be_printf(&ioreq->blkdev->xendev, 0, + "can't map %d grant refs (%s, %d maps)\n", + ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map); + return -1; + } + for (i = 0; i < ioreq->v.niov; i++) { + ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE + + (uintptr_t)ioreq->v.iov[i].iov_base; + } + ioreq->blkdev->cnt_map += ioreq->v.niov; } else { - for (i = 0; i < ioreq->v.niov; i++) { - ioreq->page[i] = xc_gnttab_map_grant_ref - (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot); - if (ioreq->page[i] == NULL) { - xen_be_printf(&ioreq->blkdev->xendev, 0, - "can't map grant ref %d (%s, %d maps)\n", - ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map); - ioreq_unmap(ioreq); - return -1; - } - ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base; - ioreq->blkdev->cnt_map++; - } + for (i = 0; i < ioreq->v.niov; i++) { + ioreq->page[i] = xc_gnttab_map_grant_ref + (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot); + if (ioreq->page[i] == NULL) { + xen_be_printf(&ioreq->blkdev->xendev, 0, + "can't map grant ref %d (%s, %d maps)\n", + ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map); + ioreq_unmap(ioreq); + return -1; + } + ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base; + ioreq->blkdev->cnt_map++; + } } return 0; } @@ -309,54 +319,58 @@ static int ioreq_runio_qemu_sync(struct ioreq *ioreq) int i, rc, len = 0; off_t pos; - if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) - goto err_no_map; - if (ioreq->presync) - bdrv_flush(blkdev->bs); + if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { + goto err_no_map; + } + if (ioreq->presync) { + bdrv_flush(blkdev->bs); + } switch (ioreq->req.operation) { case BLKIF_OP_READ: - pos = ioreq->start; - for (i = 0; i < ioreq->v.niov; i++) { - rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE, - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len / BLOCK_SIZE); - if (rc != 0) { - xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n", - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len); - goto err; - } - len += ioreq->v.iov[i].iov_len; - pos += ioreq->v.iov[i].iov_len; - } - break; + pos = ioreq->start; + for (i = 0; i < ioreq->v.niov; i++) { + rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE, + ioreq->v.iov[i].iov_base, + ioreq->v.iov[i].iov_len / BLOCK_SIZE); + if (rc != 0) { + xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n", + ioreq->v.iov[i].iov_base, + ioreq->v.iov[i].iov_len); + goto err; + } + len += ioreq->v.iov[i].iov_len; + pos += ioreq->v.iov[i].iov_len; + } + break; case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: - if (!ioreq->req.nr_segments) + if (!ioreq->req.nr_segments) { break; - pos = ioreq->start; - for (i = 0; i < ioreq->v.niov; i++) { - rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE, - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len / BLOCK_SIZE); - if (rc != 0) { - xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n", - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len); - goto err; - } - len += ioreq->v.iov[i].iov_len; - pos += ioreq->v.iov[i].iov_len; - } - break; + } + pos = ioreq->start; + for (i = 0; i < ioreq->v.niov; i++) { + rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE, + ioreq->v.iov[i].iov_base, + ioreq->v.iov[i].iov_len / BLOCK_SIZE); + if (rc != 0) { + xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n", + ioreq->v.iov[i].iov_base, + ioreq->v.iov[i].iov_len); + goto err; + } + len += ioreq->v.iov[i].iov_len; + pos += ioreq->v.iov[i].iov_len; + } + break; default: - /* unknown operation (shouldn't happen -- parse catches this) */ - goto err; + /* unknown operation (shouldn't happen -- parse catches this) */ + goto err; } - if (ioreq->postsync) - bdrv_flush(blkdev->bs); + if (ioreq->postsync) { + bdrv_flush(blkdev->bs); + } ioreq->status = BLKIF_RSP_OKAY; ioreq_unmap(ioreq); @@ -382,8 +396,9 @@ static void qemu_aio_complete(void *opaque, int ret) } ioreq->aio_inflight--; - if (ioreq->aio_inflight > 0) + if (ioreq->aio_inflight > 0) { return; + } ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; ioreq_unmap(ioreq); @@ -395,12 +410,14 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) { struct XenBlkDev *blkdev = ioreq->blkdev; - if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) - goto err_no_map; + if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { + goto err_no_map; + } ioreq->aio_inflight++; - if (ioreq->presync) - bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ + if (ioreq->presync) { + bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ + } switch (ioreq->req.operation) { case BLKIF_OP_READ: @@ -408,23 +425,25 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE, &ioreq->v, ioreq->v.size / BLOCK_SIZE, qemu_aio_complete, ioreq); - break; + break; case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: - if (!ioreq->req.nr_segments) + if (!ioreq->req.nr_segments) { break; + } ioreq->aio_inflight++; bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE, &ioreq->v, ioreq->v.size / BLOCK_SIZE, qemu_aio_complete, ioreq); - break; + break; default: - /* unknown operation (shouldn't happen -- parse catches this) */ - goto err; + /* unknown operation (shouldn't happen -- parse catches this) */ + goto err; } - if (ioreq->postsync) - bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ + if (ioreq->postsync) { + bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ + } qemu_aio_complete(ioreq, 0); return 0; @@ -452,36 +471,37 @@ static int blk_send_response_one(struct ioreq *ioreq) /* Place on the response ring for the relevant domain. */ switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: - dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); - break; + dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); + break; case BLKIF_PROTOCOL_X86_32: dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part, blkdev->rings.x86_32_part.rsp_prod_pvt); - break; + break; case BLKIF_PROTOCOL_X86_64: dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part, blkdev->rings.x86_64_part.rsp_prod_pvt); - break; + break; default: - dst = NULL; + dst = NULL; } memcpy(dst, &resp, sizeof(resp)); blkdev->rings.common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify); if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) { - /* - * Tail check for pending requests. Allows frontend to avoid - * notifications if requests are already in flight (lower - * overheads and promotes batching). - */ - RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests); + /* + * Tail check for pending requests. Allows frontend to avoid + * notifications if requests are already in flight (lower + * overheads and promotes batching). + */ + RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests); } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) { - have_requests = 1; + have_requests = 1; } - if (have_requests) - blkdev->more_work++; + if (have_requests) { + blkdev->more_work++; + } return send_notify; } @@ -493,28 +513,29 @@ static void blk_send_response_all(struct XenBlkDev *blkdev) while (!QLIST_EMPTY(&blkdev->finished)) { ioreq = QLIST_FIRST(&blkdev->finished); - send_notify += blk_send_response_one(ioreq); - ioreq_release(ioreq); + send_notify += blk_send_response_one(ioreq); + ioreq_release(ioreq); + } + if (send_notify) { + xen_be_send_notify(&blkdev->xendev); } - if (send_notify) - xen_be_send_notify(&blkdev->xendev); } static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc) { switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: - memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), - sizeof(ioreq->req)); - break; + memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), + sizeof(ioreq->req)); + break; case BLKIF_PROTOCOL_X86_32: blkif_get_x86_32_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc)); - break; + break; case BLKIF_PROTOCOL_X86_64: blkif_get_x86_64_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc)); - break; + break; } return 0; } @@ -530,12 +551,14 @@ static void blk_handle_requests(struct XenBlkDev *blkdev) rp = blkdev->rings.common.sring->req_prod; xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ - if (use_aio) + if (use_aio) { blk_send_response_all(blkdev); + } while (rc != rp) { /* pull request from ring */ - if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) + if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) { break; + } ioreq = ioreq_start(blkdev); if (ioreq == NULL) { blkdev->more_work++; @@ -546,8 +569,9 @@ static void blk_handle_requests(struct XenBlkDev *blkdev) /* parse them */ if (ioreq_parse(ioreq) != 0) { - if (blk_send_response_one(ioreq)) + if (blk_send_response_one(ioreq)) { xen_be_send_notify(&blkdev->xendev); + } ioreq_release(ioreq); continue; } @@ -560,11 +584,13 @@ static void blk_handle_requests(struct XenBlkDev *blkdev) ioreq_runio_qemu_sync(ioreq); } } - if (!use_aio) + if (!use_aio) { blk_send_response_all(blkdev); + } - if (blkdev->more_work && blkdev->requests_inflight < max_requests) + if (blkdev->more_work && blkdev->requests_inflight < max_requests) { qemu_bh_schedule(blkdev->bh); + } } /* ------------------------------------------------------------- */ @@ -583,8 +609,9 @@ static void blk_alloc(struct XenDevice *xendev) QLIST_INIT(&blkdev->finished); QLIST_INIT(&blkdev->freelist); blkdev->bh = qemu_bh_new(blk_bh, blkdev); - if (xen_mode != XEN_EMULATE) + if (xen_mode != XEN_EMULATE) { batch_maps = 1; + } } static int blk_init(struct XenDevice *xendev) @@ -595,44 +622,50 @@ static int blk_init(struct XenDevice *xendev) /* read xenstore entries */ if (blkdev->params == NULL) { - blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params"); + blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params"); h = strchr(blkdev->params, ':'); - if (h != NULL) { - blkdev->fileproto = blkdev->params; - blkdev->filename = h+1; - *h = 0; - } else { - blkdev->fileproto = "<unset>"; - blkdev->filename = blkdev->params; - } - } - if (blkdev->mode == NULL) - blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode"); - if (blkdev->type == NULL) - blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type"); - if (blkdev->dev == NULL) - blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev"); - if (blkdev->devtype == NULL) - blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type"); + if (h != NULL) { + blkdev->fileproto = blkdev->params; + blkdev->filename = h+1; + *h = 0; + } else { + blkdev->fileproto = "<unset>"; + blkdev->filename = blkdev->params; + } + } + if (blkdev->mode == NULL) { + blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode"); + } + if (blkdev->type == NULL) { + blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type"); + } + if (blkdev->dev == NULL) { + blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev"); + } + if (blkdev->devtype == NULL) { + blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type"); + } /* do we have all we need? */ if (blkdev->params == NULL || - blkdev->mode == NULL || - blkdev->type == NULL || - blkdev->dev == NULL) - return -1; + blkdev->mode == NULL || + blkdev->type == NULL || + blkdev->dev == NULL) { + return -1; + } /* read-only ? */ if (strcmp(blkdev->mode, "w") == 0) { - qflags = BDRV_O_RDWR; + qflags = BDRV_O_RDWR; } else { - qflags = 0; - info |= VDISK_READONLY; + qflags = 0; + info |= VDISK_READONLY; } /* cdrom ? */ - if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) - info |= VDISK_CDROM; + if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) { + info |= VDISK_CDROM; + } /* init qemu block driver */ index = (blkdev->xendev.dev - 202 * 256) / 16; @@ -649,7 +682,7 @@ static int blk_init(struct XenDevice *xendev) } else { /* setup via qemu cmdline -> already setup for us */ xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n"); - blkdev->bs = blkdev->dinfo->bdrv; + blkdev->bs = blkdev->dinfo->bdrv; } blkdev->file_blk = BLOCK_SIZE; blkdev->file_size = bdrv_getlength(blkdev->bs); @@ -657,21 +690,21 @@ static int blk_init(struct XenDevice *xendev) xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n", (int)blkdev->file_size, strerror(-blkdev->file_size), blkdev->bs->drv ? blkdev->bs->drv->format_name : "-"); - blkdev->file_size = 0; + blkdev->file_size = 0; } have_barriers = blkdev->bs->drv && blkdev->bs->drv->bdrv_flush ? 1 : 0; xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\"," - " size %" PRId64 " (%" PRId64 " MB)\n", - blkdev->type, blkdev->fileproto, blkdev->filename, - blkdev->file_size, blkdev->file_size >> 20); + " size %" PRId64 " (%" PRId64 " MB)\n", + blkdev->type, blkdev->fileproto, blkdev->filename, + blkdev->file_size, blkdev->file_size >> 20); /* fill info */ xenstore_write_be_int(&blkdev->xendev, "feature-barrier", have_barriers); xenstore_write_be_int(&blkdev->xendev, "info", info); xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk); xenstore_write_be_int(&blkdev->xendev, "sectors", - blkdev->file_size / blkdev->file_blk); + blkdev->file_size / blkdev->file_blk); return 0; } @@ -679,57 +712,62 @@ static int blk_connect(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); - if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) - return -1; + if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) { + return -1; + } if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", - &blkdev->xendev.remote_port) == -1) - return -1; + &blkdev->xendev.remote_port) == -1) { + return -1; + } blkdev->protocol = BLKIF_PROTOCOL_NATIVE; if (blkdev->xendev.protocol) { - if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) + if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) { blkdev->protocol = BLKIF_PROTOCOL_X86_32; - if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) + } + if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) { blkdev->protocol = BLKIF_PROTOCOL_X86_64; + } } blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev, - blkdev->xendev.dom, - blkdev->ring_ref, - PROT_READ | PROT_WRITE); - if (!blkdev->sring) - return -1; + blkdev->xendev.dom, + blkdev->ring_ref, + PROT_READ | PROT_WRITE); + if (!blkdev->sring) { + return -1; + } blkdev->cnt_map++; switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: { - blkif_sring_t *sring_native = blkdev->sring; - BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); - break; + blkif_sring_t *sring_native = blkdev->sring; + BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); + break; } case BLKIF_PROTOCOL_X86_32: { - blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; + blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE); - break; + break; } case BLKIF_PROTOCOL_X86_64: { - blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; + blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE); - break; + break; } } xen_be_bind_evtchn(&blkdev->xendev); xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " - "remote port %d, local port %d\n", - blkdev->xendev.protocol, blkdev->ring_ref, - blkdev->xendev.remote_port, blkdev->xendev.local_port); + "remote port %d, local port %d\n", + blkdev->xendev.protocol, blkdev->ring_ref, + blkdev->xendev.remote_port, blkdev->xendev.local_port); return 0; } @@ -743,14 +781,14 @@ static void blk_disconnect(struct XenDevice *xendev) bdrv_close(blkdev->bs); bdrv_delete(blkdev->bs); } - blkdev->bs = NULL; + blkdev->bs = NULL; } xen_be_unbind_evtchn(&blkdev->xendev); if (blkdev->sring) { - xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); - blkdev->cnt_map--; - blkdev->sring = NULL; + xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); + blkdev->cnt_map--; + blkdev->sring = NULL; } } @@ -760,10 +798,10 @@ static int blk_free(struct XenDevice *xendev) struct ioreq *ioreq; while (!QLIST_EMPTY(&blkdev->freelist)) { - ioreq = QLIST_FIRST(&blkdev->freelist); + ioreq = QLIST_FIRST(&blkdev->freelist); QLIST_REMOVE(ioreq, list); qemu_iovec_destroy(&ioreq->v); - qemu_free(ioreq); + qemu_free(ioreq); } qemu_free(blkdev->params); |