aboutsummaryrefslogtreecommitdiff
path: root/block/mirror.c
diff options
context:
space:
mode:
authorEric Blake <eblake@redhat.com>2017-09-25 09:55:18 -0500
committerKevin Wolf <kwolf@redhat.com>2017-10-06 16:28:58 +0200
commit9a46dba7b76f5198555819905d1d8235947ba98f (patch)
treeb61387e074642d58b367fec6a28eb2baaf6fe36e /block/mirror.c
parentf798184cfdcb7f92a38c5f717d675bd75e1fd3ac (diff)
dirty-bitmap: Change bdrv_get_dirty_count() to report bytes
Thanks to recent cleanups, all callers were scaling a return value of sectors into bytes; do the scaling internally instead. Signed-off-by: Eric Blake <eblake@redhat.com> Reviewed-by: John Snow <jsnow@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/mirror.c')
-rw-r--r--block/mirror.c16
1 files changed, 6 insertions, 10 deletions
diff --git a/block/mirror.c b/block/mirror.c
index de0a02778c..c5212d2c8d 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -340,8 +340,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
if (offset < 0) {
bdrv_set_dirty_iter(s->dbi, 0);
offset = bdrv_dirty_iter_next(s->dbi);
- trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap) *
- BDRV_SECTOR_SIZE);
+ trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
assert(offset >= 0);
}
bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
@@ -811,11 +810,10 @@ static void coroutine_fn mirror_run(void *opaque)
cnt = bdrv_get_dirty_count(s->dirty_bitmap);
/* s->common.offset contains the number of bytes already processed so
- * far, cnt is the number of dirty sectors remaining and
+ * far, cnt is the number of dirty bytes remaining and
* s->bytes_in_flight is the number of bytes currently being
* processed; together those are the current total operation length */
- s->common.len = s->common.offset + s->bytes_in_flight +
- cnt * BDRV_SECTOR_SIZE;
+ s->common.len = s->common.offset + s->bytes_in_flight + cnt;
/* Note that even when no rate limit is applied we need to yield
* periodically with no pending I/O so that bdrv_drain_all() returns.
@@ -827,8 +825,7 @@ static void coroutine_fn mirror_run(void *opaque)
s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
(cnt == 0 && s->in_flight > 0)) {
- trace_mirror_yield(s, cnt * BDRV_SECTOR_SIZE,
- s->buf_free_count, s->in_flight);
+ trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
mirror_wait_for_io(s);
continue;
} else if (cnt != 0) {
@@ -869,7 +866,7 @@ static void coroutine_fn mirror_run(void *opaque)
* whether to switch to target check one last time if I/O has
* come in the meanwhile, and if not flush the data to disk.
*/
- trace_mirror_before_drain(s, cnt * BDRV_SECTOR_SIZE);
+ trace_mirror_before_drain(s, cnt);
bdrv_drained_begin(bs);
cnt = bdrv_get_dirty_count(s->dirty_bitmap);
@@ -888,8 +885,7 @@ static void coroutine_fn mirror_run(void *opaque)
}
ret = 0;
- trace_mirror_before_sleep(s, cnt * BDRV_SECTOR_SIZE,
- s->synced, delay_ns);
+ trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
if (!s->synced) {
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
if (block_job_is_cancelled(&s->common)) {