aboutsummaryrefslogtreecommitdiff
path: root/block/iscsi.c
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>2021-09-03 13:28:03 +0300
committerEric Blake <eblake@redhat.com>2021-09-29 13:46:32 -0500
commitf34b2bcf8c393929c7d2100eb2f3909cac0d1cab (patch)
tree4f6afd5d6e95ca267fcce80a82c06630f366e72f /block/iscsi.c
parentd544f5d3b1df79d71eae4d7fac528f65d22265ad (diff)
block: use int64_t instead of int in driver write_zeroes handlers
We are generally moving to int64_t for both offset and bytes parameters on all io paths. Main motivation is realization of 64-bit write_zeroes operation for fast zeroing large disk chunks, up to the whole disk. We chose signed type, to be consistent with off_t (which is signed) and with possibility for signed return type (where negative value means error). So, convert driver write_zeroes handlers bytes parameter to int64_t. The only caller of all updated function is bdrv_co_do_pwrite_zeroes(). bdrv_co_do_pwrite_zeroes() itself is of course OK with widening of callee parameter type. Also, bdrv_co_do_pwrite_zeroes()'s max_write_zeroes is limited to INT_MAX. So, updated functions all are safe, they will not get "bytes" larger than before. Still, let's look through all updated functions, and add assertions to the ones which are actually unprepared to values larger than INT_MAX. For these drivers also set explicit max_pwrite_zeroes limit. Let's go: blkdebug: calculations can't overflow, thanks to bdrv_check_qiov_request() in generic layer. rule_check() and bdrv_co_pwrite_zeroes() both have 64bit argument. blklogwrites: pass to blk_log_writes_co_log() with 64bit argument. blkreplay, copy-on-read, filter-compress: pass to bdrv_co_pwrite_zeroes() which is OK copy-before-write: Calls cbw_do_copy_before_write() and bdrv_co_pwrite_zeroes, both have 64bit argument. file-posix: both handler calls raw_do_pwrite_zeroes, which is updated. In raw_do_pwrite_zeroes() calculations are OK due to bdrv_check_qiov_request(), bytes go to RawPosixAIOData::aio_nbytes which is uint64_t. Check also where that uint64_t gets handed: handle_aiocb_write_zeroes_block() passes a uint64_t[2] to ioctl(BLKZEROOUT), handle_aiocb_write_zeroes() calls do_fallocate() which takes off_t (and we compile to always have 64-bit off_t), as does handle_aiocb_write_zeroes_unmap. All look safe. gluster: bytes go to GlusterAIOCB::size which is int64_t and to glfs_zerofill_async works with off_t. iscsi: Aha, here we deal with iscsi_writesame16_task() that has uint32_t num_blocks argument and iscsi_writesame16_task() has uint16_t argument. Make comments, add assertions and clarify max_pwrite_zeroes calculation. iscsi_allocmap_() functions already has int64_t argument is_byte_request_lun_aligned is simple to update, do it. mirror_top: pass to bdrv_mirror_top_do_write which has uint64_t argument nbd: Aha, here we have protocol limitation, and NBDRequest::len is uint32_t. max_pwrite_zeroes is cleanly set to 32bit value, so we are OK for now. nvme: Again, protocol limitation. And no inherent limit for write-zeroes at all. But from code that calculates cdw12 it's obvious that we do have limit and alignment. Let's clarify it. Also, obviously the code is not prepared to handle bytes=0. Let's handle this case too. trace events already 64bit preallocate: pass to handle_write() and bdrv_co_pwrite_zeroes(), both 64bit. rbd: pass to qemu_rbd_start_co() which is 64bit. qcow2: offset + bytes and alignment still works good (thanks to bdrv_check_qiov_request()), so tail calculation is OK qcow2_subcluster_zeroize() has 64bit argument, should be OK trace events updated qed: qed_co_request wants int nb_sectors. Also in code we have size_t used for request length which may be 32bit. So, let's just keep INT_MAX as a limit (aligning it down to pwrite_zeroes_alignment) and don't care. raw-format: Is OK. raw_adjust_offset and bdrv_co_pwrite_zeroes are both 64bit. throttle: Both throttle_group_co_io_limits_intercept() and bdrv_co_pwrite_zeroes() are 64bit. vmdk: pass to vmdk_pwritev which is 64bit quorum: pass to quorum_co_pwritev() which is 64bit Hooray! At this point all block drivers are prepared to support 64bit write-zero requests, or have explicitly set max_pwrite_zeroes. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210903102807.27127-8-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: use <= rather than < in assertions relying on max_pwrite_zeroes] Signed-off-by: Eric Blake <eblake@redhat.com>
Diffstat (limited to 'block/iscsi.c')
-rw-r--r--block/iscsi.c30
1 files changed, 20 insertions, 10 deletions
diff --git a/block/iscsi.c b/block/iscsi.c
index 01fdd1775f..74ff7e307e 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -427,14 +427,14 @@ static int64_t sector_qemu2lun(int64_t sector, IscsiLun *iscsilun)
return sector * BDRV_SECTOR_SIZE / iscsilun->block_size;
}
-static bool is_byte_request_lun_aligned(int64_t offset, int count,
+static bool is_byte_request_lun_aligned(int64_t offset, int64_t bytes,
IscsiLun *iscsilun)
{
- if (offset % iscsilun->block_size || count % iscsilun->block_size) {
+ if (offset % iscsilun->block_size || bytes % iscsilun->block_size) {
error_report("iSCSI misaligned request: "
"iscsilun->block_size %u, offset %" PRIi64
- ", count %d",
- iscsilun->block_size, offset, count);
+ ", bytes %" PRIi64,
+ iscsilun->block_size, offset, bytes);
return false;
}
return true;
@@ -1202,12 +1202,12 @@ out_unlock:
static int
coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
- int bytes, BdrvRequestFlags flags)
+ int64_t bytes, BdrvRequestFlags flags)
{
IscsiLun *iscsilun = bs->opaque;
struct IscsiTask iTask;
uint64_t lba;
- uint32_t nb_blocks;
+ uint64_t nb_blocks;
bool use_16_for_ws = iscsilun->use_16_for_rw;
int r = 0;
@@ -1247,11 +1247,21 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
iscsi_co_init_iscsitask(iscsilun, &iTask);
retry:
if (use_16_for_ws) {
+ /*
+ * iscsi_writesame16_task num_blocks argument is uint32_t. We rely here
+ * on our max_pwrite_zeroes limit.
+ */
+ assert(nb_blocks <= UINT32_MAX);
iTask.task = iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba,
iscsilun->zeroblock, iscsilun->block_size,
nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
0, 0, iscsi_co_generic_cb, &iTask);
} else {
+ /*
+ * iscsi_writesame10_task num_blocks argument is uint16_t. We rely here
+ * on our max_pwrite_zeroes limit.
+ */
+ assert(nb_blocks <= UINT16_MAX);
iTask.task = iscsi_writesame10_task(iscsilun->iscsi, iscsilun->lun, lba,
iscsilun->zeroblock, iscsilun->block_size,
nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
@@ -2071,10 +2081,10 @@ static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp)
bs->bl.pdiscard_alignment = iscsilun->block_size;
}
- if (iscsilun->bl.max_ws_len < 0xffffffff / block_size) {
- bs->bl.max_pwrite_zeroes =
- iscsilun->bl.max_ws_len * iscsilun->block_size;
- }
+ bs->bl.max_pwrite_zeroes =
+ MIN_NON_ZERO(iscsilun->bl.max_ws_len * iscsilun->block_size,
+ max_xfer_len * iscsilun->block_size);
+
if (iscsilun->lbp.lbpws) {
bs->bl.pwrite_zeroes_alignment =
iscsilun->bl.opt_unmap_gran * iscsilun->block_size;