aboutsummaryrefslogtreecommitdiff
path: root/qemu-img.c
diff options
context:
space:
mode:
Diffstat (limited to 'qemu-img.c')
-rw-r--r--qemu-img.c44
1 files changed, 38 insertions, 6 deletions
diff --git a/qemu-img.c b/qemu-img.c
index f4074ebf75..4a7ce43dc9 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -1105,11 +1105,15 @@ static int64_t find_nonzero(const uint8_t *buf, int64_t n)
*
* 'pnum' is set to the number of sectors (including and immediately following
* the first one) that are known to be in the same allocated/unallocated state.
+ * The function will try to align the end offset to alignment boundaries so
+ * that the request will at least end aligned and consequtive requests will
+ * also start at an aligned offset.
*/
-static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum)
+static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum,
+ int64_t sector_num, int alignment)
{
bool is_zero;
- int i;
+ int i, tail;
if (n <= 0) {
*pnum = 0;
@@ -1122,6 +1126,23 @@ static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum)
break;
}
}
+
+ tail = (sector_num + i) & (alignment - 1);
+ if (tail) {
+ if (is_zero && i <= tail) {
+ /* treat unallocated areas which only consist
+ * of a small tail as allocated. */
+ is_zero = false;
+ }
+ if (!is_zero) {
+ /* align up end offset of allocated areas. */
+ i += alignment - tail;
+ i = MIN(i, n);
+ } else {
+ /* align down end offset of zero areas. */
+ i -= tail;
+ }
+ }
*pnum = i;
return !is_zero;
}
@@ -1132,7 +1153,7 @@ static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum)
* breaking up write requests for only small sparse areas.
*/
static int is_allocated_sectors_min(const uint8_t *buf, int n, int *pnum,
- int min)
+ int min, int64_t sector_num, int alignment)
{
int ret;
int num_checked, num_used;
@@ -1141,7 +1162,7 @@ static int is_allocated_sectors_min(const uint8_t *buf, int n, int *pnum,
min = n;
}
- ret = is_allocated_sectors(buf, n, pnum);
+ ret = is_allocated_sectors(buf, n, pnum, sector_num, alignment);
if (!ret) {
return ret;
}
@@ -1149,13 +1170,15 @@ static int is_allocated_sectors_min(const uint8_t *buf, int n, int *pnum,
num_used = *pnum;
buf += BDRV_SECTOR_SIZE * *pnum;
n -= *pnum;
+ sector_num += *pnum;
num_checked = num_used;
while (n > 0) {
- ret = is_allocated_sectors(buf, n, pnum);
+ ret = is_allocated_sectors(buf, n, pnum, sector_num, alignment);
buf += BDRV_SECTOR_SIZE * *pnum;
n -= *pnum;
+ sector_num += *pnum;
num_checked += *pnum;
if (ret) {
num_used = num_checked;
@@ -1560,6 +1583,7 @@ typedef struct ImgConvertState {
bool wr_in_order;
bool copy_range;
int min_sparse;
+ int alignment;
size_t cluster_sectors;
size_t buf_sectors;
long num_coroutines;
@@ -1724,7 +1748,8 @@ static int coroutine_fn convert_co_write(ImgConvertState *s, int64_t sector_num,
* zeroed. */
if (!s->min_sparse ||
(!s->compressed &&
- is_allocated_sectors_min(buf, n, &n, s->min_sparse)) ||
+ is_allocated_sectors_min(buf, n, &n, s->min_sparse,
+ sector_num, s->alignment)) ||
(s->compressed &&
!buffer_is_zero(buf, n * BDRV_SECTOR_SIZE)))
{
@@ -2368,6 +2393,13 @@ static int img_convert(int argc, char **argv)
out_bs->bl.pdiscard_alignment >>
BDRV_SECTOR_BITS)));
+ /* try to align the write requests to the destination to avoid unnecessary
+ * RMW cycles. */
+ s.alignment = MAX(pow2floor(s.min_sparse),
+ DIV_ROUND_UP(out_bs->bl.request_alignment,
+ BDRV_SECTOR_SIZE));
+ assert(is_power_of_2(s.alignment));
+
if (skip_create) {
int64_t output_sectors = blk_nb_sectors(s.target);
if (output_sectors < 0) {