aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin Wolf <kwolf@redhat.com>2014-07-01 16:09:54 +0200
committerKevin Wolf <kwolf@redhat.com>2014-07-14 12:03:20 +0200
commit8eb029c26ecf61da6c2b45c3c23472e8c477ba34 (patch)
tree33de0b160ce79aac82971c308b748adc96df0d71
parentf06ee3d4aa547df8d7d2317b2b6db7a88c1f3744 (diff)
block: Assert qiov length matches request length
At least raw-posix relies on this because it can allocate bounce buffers based on the request length, but access it using all of the qiov entries later. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com>
-rw-r--r--block.c2
-rw-r--r--block/raw-posix.c15
2 files changed, 13 insertions, 4 deletions
diff --git a/block.c b/block.c
index 0143268279..3e252a26c2 100644
--- a/block.c
+++ b/block.c
@@ -3010,6 +3010,7 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
+ assert(!qiov || bytes == qiov->size);
/* Handle Copy on Read and associated serialisation */
if (flags & BDRV_REQ_COPY_ON_READ) {
@@ -3279,6 +3280,7 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
+ assert(!qiov || bytes == qiov->size);
waited = wait_serialising_requests(req);
assert(!waited || !req->serialising);
diff --git a/block/raw-posix.c b/block/raw-posix.c
index a857def671..2bcc73dc37 100644
--- a/block/raw-posix.c
+++ b/block/raw-posix.c
@@ -790,6 +790,7 @@ static ssize_t handle_aiocb_rw(RawPosixAIOData *aiocb)
memcpy(p, aiocb->aio_iov[i].iov_base, aiocb->aio_iov[i].iov_len);
p += aiocb->aio_iov[i].iov_len;
}
+ assert(p - buf == aiocb->aio_nbytes);
}
nbytes = handle_aiocb_rw_linear(aiocb, buf);
@@ -804,9 +805,11 @@ static ssize_t handle_aiocb_rw(RawPosixAIOData *aiocb)
copy = aiocb->aio_iov[i].iov_len;
}
memcpy(aiocb->aio_iov[i].iov_base, p, copy);
+ assert(count >= copy);
p += copy;
count -= copy;
}
+ assert(count == 0);
}
qemu_vfree(buf);
@@ -993,12 +996,14 @@ static int paio_submit_co(BlockDriverState *bs, int fd,
acb->aio_type = type;
acb->aio_fildes = fd;
+ acb->aio_nbytes = nb_sectors * BDRV_SECTOR_SIZE;
+ acb->aio_offset = sector_num * BDRV_SECTOR_SIZE;
+
if (qiov) {
acb->aio_iov = qiov->iov;
acb->aio_niov = qiov->niov;
+ assert(qiov->size == acb->aio_nbytes);
}
- acb->aio_nbytes = nb_sectors * 512;
- acb->aio_offset = sector_num * 512;
trace_paio_submit_co(sector_num, nb_sectors, type);
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
@@ -1016,12 +1021,14 @@ static BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd,
acb->aio_type = type;
acb->aio_fildes = fd;
+ acb->aio_nbytes = nb_sectors * BDRV_SECTOR_SIZE;
+ acb->aio_offset = sector_num * BDRV_SECTOR_SIZE;
+
if (qiov) {
acb->aio_iov = qiov->iov;
acb->aio_niov = qiov->niov;
+ assert(qiov->size == acb->aio_nbytes);
}
- acb->aio_nbytes = nb_sectors * 512;
- acb->aio_offset = sector_num * 512;
trace_paio_submit(acb, opaque, sector_num, nb_sectors, type);
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));