diff options
author | Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> | 2012-01-18 14:40:42 +0000 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2012-01-26 11:45:26 +0100 |
commit | 470c05047a09cda3de16bb3f98a130d9537357a4 (patch) | |
tree | c101557e6ff36ca1fe1465884062433358f85714 /block.c | |
parent | 2d3735d3bf61d5c8e154a197a11535cc65044334 (diff) |
block: make copy-on-read a per-request flag
Previously copy-on-read could only be enabled for all requests to a
block device. This means requests coming from the guest as well as
QEMU's internal requests would perform copy-on-read when enabled.
For image streaming we want to support finer-grained behavior than just
populating the image file from its backing image. Image streaming
supports partial streaming where a common backing image is preserved.
In this case guest requests should not perform copy-on-read because they
would indiscriminately copy data which should be left in a backing image
from the backing chain.
Introduce a per-request flag for copy-on-read so that a block device can
process both regular and copy-on-read requests. Overlapping reads and
writes still need to be serialized for correctness when copy-on-read is
happening, so add an in-flight reference count to track this.
Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block.c')
-rw-r--r-- | block.c | 49 |
1 files changed, 38 insertions, 11 deletions
@@ -48,6 +48,10 @@ #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ +typedef enum { + BDRV_REQ_COPY_ON_READ = 0x1, +} BdrvRequestFlags; + static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load); static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, @@ -62,7 +66,8 @@ static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *iov); static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, + BdrvRequestFlags flags); static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, @@ -1292,7 +1297,7 @@ static void coroutine_fn bdrv_rw_co_entry(void *opaque) if (!rwco->is_write) { rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num, - rwco->nb_sectors, rwco->qiov); + rwco->nb_sectors, rwco->qiov, 0); } else { rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num, rwco->nb_sectors, rwco->qiov); @@ -1500,7 +1505,7 @@ int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, return 0; } -static int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, +static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { /* Perform I/O through a temporary buffer so that users who scribble over @@ -1523,8 +1528,8 @@ static int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, round_to_clusters(bs, sector_num, nb_sectors, &cluster_sector_num, &cluster_nb_sectors); - trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors, - cluster_sector_num, cluster_nb_sectors); + trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, + cluster_sector_num, cluster_nb_sectors); iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len); @@ -1559,7 +1564,8 @@ err: * Handle a read request in coroutine context */ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, + BdrvRequestFlags flags) { BlockDriver *drv = bs->drv; BdrvTrackedRequest req; @@ -1578,12 +1584,19 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, } if (bs->copy_on_read) { + flags |= BDRV_REQ_COPY_ON_READ; + } + if (flags & BDRV_REQ_COPY_ON_READ) { + bs->copy_on_read_in_flight++; + } + + if (bs->copy_on_read_in_flight) { wait_for_overlapping_requests(bs, sector_num, nb_sectors); } tracked_request_begin(&req, bs, sector_num, nb_sectors, false); - if (bs->copy_on_read) { + if (flags & BDRV_REQ_COPY_ON_READ) { int pnum; ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum); @@ -1592,7 +1605,7 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, } if (!ret || pnum != nb_sectors) { - ret = bdrv_co_copy_on_readv(bs, sector_num, nb_sectors, qiov); + ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); goto out; } } @@ -1601,6 +1614,11 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, out: tracked_request_end(&req); + + if (flags & BDRV_REQ_COPY_ON_READ) { + bs->copy_on_read_in_flight--; + } + return ret; } @@ -1609,7 +1627,16 @@ int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, { trace_bdrv_co_readv(bs, sector_num, nb_sectors); - return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov); + return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); +} + +int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) +{ + trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); + + return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, + BDRV_REQ_COPY_ON_READ); } /* @@ -1637,7 +1664,7 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, bdrv_io_limits_intercept(bs, true, nb_sectors); } - if (bs->copy_on_read) { + if (bs->copy_on_read_in_flight) { wait_for_overlapping_requests(bs, sector_num, nb_sectors); } @@ -3144,7 +3171,7 @@ static void coroutine_fn bdrv_co_do_rw(void *opaque) if (!acb->is_write) { acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, - acb->req.nb_sectors, acb->req.qiov); + acb->req.nb_sectors, acb->req.qiov, 0); } else { acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, acb->req.nb_sectors, acb->req.qiov); |