diff options
author | Eric Blake <eblake@redhat.com> | 2017-09-25 09:55:18 -0500 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2017-10-06 16:28:58 +0200 |
commit | 9a46dba7b76f5198555819905d1d8235947ba98f (patch) | |
tree | b61387e074642d58b367fec6a28eb2baaf6fe36e /block | |
parent | f798184cfdcb7f92a38c5f717d675bd75e1fd3ac (diff) |
dirty-bitmap: Change bdrv_get_dirty_count() to report bytes
Thanks to recent cleanups, all callers were scaling a return value
of sectors into bytes; do the scaling internally instead.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/dirty-bitmap.c | 4 | ||||
-rw-r--r-- | block/mirror.c | 16 |
2 files changed, 8 insertions, 12 deletions
diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c index e451916187..8322e23f0d 100644 --- a/block/dirty-bitmap.c +++ b/block/dirty-bitmap.c @@ -423,7 +423,7 @@ BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs) QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) { BlockDirtyInfo *info = g_new0(BlockDirtyInfo, 1); BlockDirtyInfoList *entry = g_new0(BlockDirtyInfoList, 1); - info->count = bdrv_get_dirty_count(bm) << BDRV_SECTOR_BITS; + info->count = bdrv_get_dirty_count(bm); info->granularity = bdrv_dirty_bitmap_granularity(bm); info->has_name = !!bm->name; info->name = g_strdup(bm->name); @@ -652,7 +652,7 @@ void bdrv_set_dirty_iter(BdrvDirtyBitmapIter *iter, int64_t offset) int64_t bdrv_get_dirty_count(BdrvDirtyBitmap *bitmap) { - return hbitmap_count(bitmap->bitmap); + return hbitmap_count(bitmap->bitmap) << BDRV_SECTOR_BITS; } int64_t bdrv_get_meta_dirty_count(BdrvDirtyBitmap *bitmap) diff --git a/block/mirror.c b/block/mirror.c index de0a02778c..c5212d2c8d 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -340,8 +340,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) if (offset < 0) { bdrv_set_dirty_iter(s->dbi, 0); offset = bdrv_dirty_iter_next(s->dbi); - trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap) * - BDRV_SECTOR_SIZE); + trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); assert(offset >= 0); } bdrv_dirty_bitmap_unlock(s->dirty_bitmap); @@ -811,11 +810,10 @@ static void coroutine_fn mirror_run(void *opaque) cnt = bdrv_get_dirty_count(s->dirty_bitmap); /* s->common.offset contains the number of bytes already processed so - * far, cnt is the number of dirty sectors remaining and + * far, cnt is the number of dirty bytes remaining and * s->bytes_in_flight is the number of bytes currently being * processed; together those are the current total operation length */ - s->common.len = s->common.offset + s->bytes_in_flight + - cnt * BDRV_SECTOR_SIZE; + s->common.len = s->common.offset + s->bytes_in_flight + cnt; /* Note that even when no rate limit is applied we need to yield * periodically with no pending I/O so that bdrv_drain_all() returns. @@ -827,8 +825,7 @@ static void coroutine_fn mirror_run(void *opaque) s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || (cnt == 0 && s->in_flight > 0)) { - trace_mirror_yield(s, cnt * BDRV_SECTOR_SIZE, - s->buf_free_count, s->in_flight); + trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); mirror_wait_for_io(s); continue; } else if (cnt != 0) { @@ -869,7 +866,7 @@ static void coroutine_fn mirror_run(void *opaque) * whether to switch to target check one last time if I/O has * come in the meanwhile, and if not flush the data to disk. */ - trace_mirror_before_drain(s, cnt * BDRV_SECTOR_SIZE); + trace_mirror_before_drain(s, cnt); bdrv_drained_begin(bs); cnt = bdrv_get_dirty_count(s->dirty_bitmap); @@ -888,8 +885,7 @@ static void coroutine_fn mirror_run(void *opaque) } ret = 0; - trace_mirror_before_sleep(s, cnt * BDRV_SECTOR_SIZE, - s->synced, delay_ns); + trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); if (!s->synced) { block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); if (block_job_is_cancelled(&s->common)) { |