aboutsummaryrefslogtreecommitdiff
path: root/block/block-backend.c
diff options
context:
space:
mode:
authorKevin Wolf <kwolf@redhat.com>2016-03-08 13:47:47 +0100
committerKevin Wolf <kwolf@redhat.com>2016-03-17 15:47:57 +0100
commit1bf1cbc91f3575ea27e33d0cd2cc49db81ffb2f6 (patch)
treefe0f5add533ecaf8f14e9caffe57a09b69cc9d13 /block/block-backend.c
parentf21d96d04b6949c1a5b4a24c73a296a1bc4bdad6 (diff)
block: Use blk_co_preadv() for blk_read()
This patch introduces blk_co_preadv() as a central function on the BlockBackend level that is supposed to handle all read requests from the BB to its root BDS eventually. Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/block-backend.c')
-rw-r--r--block/block-backend.c64
1 files changed, 60 insertions, 4 deletions
diff --git a/block/block-backend.c b/block/block-backend.c
index 5de785026b..c05878526a 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -22,6 +22,8 @@
/* Number of coroutines to reserve per attached device model */
#define COROUTINE_POOL_RESERVATION 64
+#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
+
static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
struct BlockBackend {
@@ -693,15 +695,69 @@ static int blk_check_request(BlockBackend *blk, int64_t sector_num,
nb_sectors * BDRV_SECTOR_SIZE);
}
-int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
- int nb_sectors)
+static int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
+ unsigned int bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags)
{
- int ret = blk_check_request(blk, sector_num, nb_sectors);
+ int ret = blk_check_byte_request(blk, offset, bytes);
if (ret < 0) {
return ret;
}
- return bdrv_read(blk_bs(blk), sector_num, buf, nb_sectors);
+ return bdrv_co_do_preadv(blk_bs(blk), offset, bytes, qiov, flags);
+}
+
+typedef struct BlkRwCo {
+ BlockBackend *blk;
+ int64_t offset;
+ QEMUIOVector *qiov;
+ int ret;
+ BdrvRequestFlags flags;
+} BlkRwCo;
+
+static void blk_read_entry(void *opaque)
+{
+ BlkRwCo *rwco = opaque;
+
+ rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, rwco->qiov->size,
+ rwco->qiov, rwco->flags);
+}
+
+int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
+ int nb_sectors)
+{
+ AioContext *aio_context;
+ QEMUIOVector qiov;
+ struct iovec iov;
+ Coroutine *co;
+ BlkRwCo rwco;
+
+ if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
+ return -EINVAL;
+ }
+
+ iov = (struct iovec) {
+ .iov_base = buf,
+ .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
+ };
+ qemu_iovec_init_external(&qiov, &iov, 1);
+
+ rwco = (BlkRwCo) {
+ .blk = blk,
+ .offset = sector_num << BDRV_SECTOR_BITS,
+ .qiov = &qiov,
+ .ret = NOT_DONE,
+ };
+
+ co = qemu_coroutine_create(blk_read_entry);
+ qemu_coroutine_enter(co, &rwco);
+
+ aio_context = blk_get_aio_context(blk);
+ while (rwco.ret == NOT_DONE) {
+ aio_poll(aio_context, true);
+ }
+
+ return rwco.ret;
}
int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,