diff options
author | Frediano Ziglio <freddy77@gmail.com> | 2011-08-23 15:21:11 +0200 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2011-08-23 17:41:14 +0200 |
commit | 27deebe83657599ae834bb455709b2a6e7c57f82 (patch) | |
tree | 8903de998a9a990165a1a72263ff492d62f0c3ab /block | |
parent | 43ca85b559795fd0f246ffb1db8e09c0da920dd0 (diff) |
qcow: Remove QCowAIOCB
Embed qcow_aio_read_cb into qcow_co_readv and qcow_aio_write_cb into qcow_co_writev
Signed-off-by: Frediano Ziglio <freddy77@gmail.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/qcow.c | 291 |
1 files changed, 123 insertions, 168 deletions
diff --git a/block/qcow.c b/block/qcow.c index 8c559e2a30..4495afea60 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -488,223 +488,178 @@ static int qcow_read(BlockDriverState *bs, int64_t sector_num, } #endif -typedef struct QCowAIOCB { - BlockDriverState *bs; - int64_t sector_num; - QEMUIOVector *qiov; - uint8_t *buf; - void *orig_buf; - int nb_sectors; -} QCowAIOCB; - -static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs, - int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, - int is_write, QCowAIOCB *acb) -{ - acb->bs = bs; - acb->sector_num = sector_num; - acb->qiov = qiov; - - if (qiov->niov > 1) { - acb->buf = acb->orig_buf = qemu_blockalign(bs, qiov->size); - if (is_write) - qemu_iovec_to_buffer(qiov, acb->buf); - } else { - acb->orig_buf = NULL; - acb->buf = (uint8_t *)qiov->iov->iov_base; - } - acb->nb_sectors = nb_sectors; - return acb; -} - -static int qcow_aio_read_cb(QCowAIOCB *acb) +static int qcow_co_readv(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov) { - BlockDriverState *bs = acb->bs; BDRVQcowState *s = bs->opaque; int index_in_cluster; - int ret, n; + int ret = 0, n; uint64_t cluster_offset; struct iovec hd_iov; QEMUIOVector hd_qiov; + uint8_t *buf; + void *orig_buf; - redo: - if (acb->nb_sectors == 0) { - /* request completed */ - return 0; + if (qiov->niov > 1) { + buf = orig_buf = qemu_blockalign(bs, qiov->size); + } else { + orig_buf = NULL; + buf = (uint8_t *)qiov->iov->iov_base; } - /* prepare next request */ - cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, - 0, 0, 0, 0); - index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); - n = s->cluster_sectors - index_in_cluster; - if (n > acb->nb_sectors) { - n = acb->nb_sectors; - } + qemu_co_mutex_lock(&s->lock); + + while (nb_sectors != 0) { + /* prepare next request */ + cluster_offset = get_cluster_offset(bs, sector_num << 9, + 0, 0, 0, 0); + index_in_cluster = sector_num & (s->cluster_sectors - 1); + n = s->cluster_sectors - index_in_cluster; + if (n > nb_sectors) { + n = nb_sectors; + } - if (!cluster_offset) { - if (bs->backing_hd) { - /* read from the base image */ - hd_iov.iov_base = (void *)acb->buf; + if (!cluster_offset) { + if (bs->backing_hd) { + /* read from the base image */ + hd_iov.iov_base = (void *)buf; + hd_iov.iov_len = n * 512; + qemu_iovec_init_external(&hd_qiov, &hd_iov, 1); + qemu_co_mutex_unlock(&s->lock); + ret = bdrv_co_readv(bs->backing_hd, sector_num, + n, &hd_qiov); + qemu_co_mutex_lock(&s->lock); + if (ret < 0) { + goto fail; + } + } else { + /* Note: in this case, no need to wait */ + memset(buf, 0, 512 * n); + } + } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) { + /* add AIO support for compressed blocks ? */ + if (decompress_cluster(bs, cluster_offset) < 0) { + goto fail; + } + memcpy(buf, + s->cluster_cache + index_in_cluster * 512, 512 * n); + } else { + if ((cluster_offset & 511) != 0) { + goto fail; + } + hd_iov.iov_base = (void *)buf; hd_iov.iov_len = n * 512; qemu_iovec_init_external(&hd_qiov, &hd_iov, 1); qemu_co_mutex_unlock(&s->lock); - ret = bdrv_co_readv(bs->backing_hd, acb->sector_num, + ret = bdrv_co_readv(bs->file, + (cluster_offset >> 9) + index_in_cluster, n, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { - return -EIO; + break; + } + if (s->crypt_method) { + encrypt_sectors(s, sector_num, buf, buf, + n, 0, + &s->aes_decrypt_key); } - } else { - /* Note: in this case, no need to wait */ - memset(acb->buf, 0, 512 * n); - } - } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) { - /* add AIO support for compressed blocks ? */ - if (decompress_cluster(bs, cluster_offset) < 0) { - return -EIO; - } - memcpy(acb->buf, - s->cluster_cache + index_in_cluster * 512, 512 * n); - } else { - if ((cluster_offset & 511) != 0) { - return -EIO; - } - hd_iov.iov_base = (void *)acb->buf; - hd_iov.iov_len = n * 512; - qemu_iovec_init_external(&hd_qiov, &hd_iov, 1); - qemu_co_mutex_unlock(&s->lock); - ret = bdrv_co_readv(bs->file, - (cluster_offset >> 9) + index_in_cluster, - n, &hd_qiov); - qemu_co_mutex_lock(&s->lock); - if (ret < 0) { - return ret; } - } + ret = 0; - /* post process the read buffer */ - if (!cluster_offset) { - /* nothing to do */ - } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) { - /* nothing to do */ - } else { - if (s->crypt_method) { - encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf, - n, 0, - &s->aes_decrypt_key); - } + nb_sectors -= n; + sector_num += n; + buf += n * 512; } - acb->nb_sectors -= n; - acb->sector_num += n; - acb->buf += n * 512; - - goto redo; -} - -static int qcow_co_readv(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, QEMUIOVector *qiov) -{ - BDRVQcowState *s = bs->opaque; - QCowAIOCB acb; - int ret; - - qcow_aio_setup(bs, sector_num, qiov, nb_sectors, 0, &acb); - - qemu_co_mutex_lock(&s->lock); - ret = qcow_aio_read_cb(&acb); +done: qemu_co_mutex_unlock(&s->lock); - if (acb.qiov->niov > 1) { - qemu_iovec_from_buffer(acb.qiov, acb.orig_buf, acb.qiov->size); - qemu_vfree(acb.orig_buf); + if (qiov->niov > 1) { + qemu_iovec_from_buffer(qiov, orig_buf, qiov->size); + qemu_vfree(orig_buf); } return ret; + +fail: + ret = -EIO; + goto done; } -static int qcow_aio_write_cb(QCowAIOCB *acb) +static int qcow_co_writev(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov) { - BlockDriverState *bs = acb->bs; BDRVQcowState *s = bs->opaque; int index_in_cluster; uint64_t cluster_offset; const uint8_t *src_buf; - int ret, n; + int ret = 0, n; uint8_t *cluster_data = NULL; struct iovec hd_iov; QEMUIOVector hd_qiov; + uint8_t *buf; + void *orig_buf; -redo: - if (acb->nb_sectors == 0) { - /* request completed */ - return 0; - } + s->cluster_cache_offset = -1; /* disable compressed cache */ - index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); - n = s->cluster_sectors - index_in_cluster; - if (n > acb->nb_sectors) { - n = acb->nb_sectors; - } - cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, 1, 0, - index_in_cluster, - index_in_cluster + n); - if (!cluster_offset || (cluster_offset & 511) != 0) { - return -EIO; - } - if (s->crypt_method) { - if (!cluster_data) { - cluster_data = g_malloc0(s->cluster_size); - } - encrypt_sectors(s, acb->sector_num, cluster_data, acb->buf, - n, 1, &s->aes_encrypt_key); - src_buf = cluster_data; + if (qiov->niov > 1) { + buf = orig_buf = qemu_blockalign(bs, qiov->size); + qemu_iovec_to_buffer(qiov, buf); } else { - src_buf = acb->buf; + orig_buf = NULL; + buf = (uint8_t *)qiov->iov->iov_base; } - hd_iov.iov_base = (void *)src_buf; - hd_iov.iov_len = n * 512; - qemu_iovec_init_external(&hd_qiov, &hd_iov, 1); - qemu_co_mutex_unlock(&s->lock); - ret = bdrv_co_writev(bs->file, - (cluster_offset >> 9) + index_in_cluster, - n, &hd_qiov); - if (cluster_data) { - free(cluster_data); - cluster_data = NULL; - } qemu_co_mutex_lock(&s->lock); - if (ret < 0) { - return ret; - } - - acb->nb_sectors -= n; - acb->sector_num += n; - acb->buf += n * 512; - goto redo; -} + while (nb_sectors != 0) { -static int qcow_co_writev(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, QEMUIOVector *qiov) -{ - BDRVQcowState *s = bs->opaque; - QCowAIOCB acb; - int ret; - - s->cluster_cache_offset = -1; /* disable compressed cache */ + index_in_cluster = sector_num & (s->cluster_sectors - 1); + n = s->cluster_sectors - index_in_cluster; + if (n > nb_sectors) { + n = nb_sectors; + } + cluster_offset = get_cluster_offset(bs, sector_num << 9, 1, 0, + index_in_cluster, + index_in_cluster + n); + if (!cluster_offset || (cluster_offset & 511) != 0) { + ret = -EIO; + break; + } + if (s->crypt_method) { + if (!cluster_data) { + cluster_data = g_malloc0(s->cluster_size); + } + encrypt_sectors(s, sector_num, cluster_data, buf, + n, 1, &s->aes_encrypt_key); + src_buf = cluster_data; + } else { + src_buf = buf; + } - qcow_aio_setup(bs, sector_num, qiov, nb_sectors, 1, &acb); + hd_iov.iov_base = (void *)src_buf; + hd_iov.iov_len = n * 512; + qemu_iovec_init_external(&hd_qiov, &hd_iov, 1); + qemu_co_mutex_unlock(&s->lock); + ret = bdrv_co_writev(bs->file, + (cluster_offset >> 9) + index_in_cluster, + n, &hd_qiov); + qemu_co_mutex_lock(&s->lock); + if (ret < 0) { + break; + } + ret = 0; - qemu_co_mutex_lock(&s->lock); - ret = qcow_aio_write_cb(&acb); + nb_sectors -= n; + sector_num += n; + buf += n * 512; + } qemu_co_mutex_unlock(&s->lock); - if (acb.qiov->niov > 1) { - qemu_vfree(acb.orig_buf); + if (qiov->niov > 1) { + qemu_vfree(orig_buf); } + free(cluster_data); return ret; } |