From 9e13d598843cca1cdab7b7bdcb9cc0868ebf7fed Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Thu, 29 Oct 2020 10:33:03 +0100 Subject: block/nvme: Align iov's va and size on host page size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure iov's va and size are properly aligned on the host page size. Signed-off-by: Eric Auger Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Stefan Hajnoczi Tested-by: Eric Auger Signed-off-by: Philippe Mathieu-Daudé Message-id: 20201029093306.1063879-23-philmd@redhat.com Signed-off-by: Stefan Hajnoczi Tested-by: Eric Auger --- block/nvme.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'block/nvme.c') diff --git a/block/nvme.c b/block/nvme.c index e807dd56df..f1e2fd34cd 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -1015,11 +1015,12 @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd, for (i = 0; i < qiov->niov; ++i) { bool retry = true; uint64_t iova; + size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len, + qemu_real_host_page_size); try_map: r = qemu_vfio_dma_map(s->vfio, qiov->iov[i].iov_base, - qiov->iov[i].iov_len, - true, &iova); + len, true, &iova); if (r == -ENOMEM && retry) { retry = false; trace_nvme_dma_flush_queue_wait(s); @@ -1163,8 +1164,9 @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs, BDRVNVMeState *s = bs->opaque; for (i = 0; i < qiov->niov; ++i) { - if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) || - !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) { + if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, + qemu_real_host_page_size) || + !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size)) { trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base, qiov->iov[i].iov_len, s->page_size); return false; @@ -1180,7 +1182,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, int r; uint8_t *buf = NULL; QEMUIOVector local_qiov; - + size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size); assert(QEMU_IS_ALIGNED(offset, s->page_size)); assert(QEMU_IS_ALIGNED(bytes, s->page_size)); assert(bytes <= s->max_transfer); @@ -1190,7 +1192,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, } s->stats.unaligned_accesses++; trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write); - buf = qemu_try_memalign(s->page_size, bytes); + buf = qemu_try_memalign(qemu_real_host_page_size, len); if (!buf) { return -ENOMEM; -- cgit v1.2.3