From 56207df55ea546f3e72578a5920e68a346440b1a Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Thu, 12 Oct 2017 16:53:09 +0300 Subject: hbitmap: add next_zero function The function searches for next zero bit. Also add interface for BdrvDirtyBitmap and unit test. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: John Snow Message-id: 20171012135313.227864-2-vsementsov@virtuozzo.com Signed-off-by: Jeff Cody --- block/dirty-bitmap.c | 5 ++++ include/block/dirty-bitmap.h | 1 + include/qemu/hbitmap.h | 8 ++++++ tests/test-hbitmap.c | 61 ++++++++++++++++++++++++++++++++++++++++++++ util/hbitmap.c | 39 ++++++++++++++++++++++++++++ 5 files changed, 114 insertions(+) diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c index bd04e991b1..7879d13ddb 100644 --- a/block/dirty-bitmap.c +++ b/block/dirty-bitmap.c @@ -715,3 +715,8 @@ char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp) { return hbitmap_sha256(bitmap->bitmap, errp); } + +int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset) +{ + return hbitmap_next_zero(bitmap->bitmap, offset); +} diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h index 3579a7597c..a591c27213 100644 --- a/include/block/dirty-bitmap.h +++ b/include/block/dirty-bitmap.h @@ -91,5 +91,6 @@ bool bdrv_has_changed_persistent_bitmaps(BlockDriverState *bs); BdrvDirtyBitmap *bdrv_dirty_bitmap_next(BlockDriverState *bs, BdrvDirtyBitmap *bitmap); char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp); +int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t start); #endif diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h index 81e78043d1..6b6490ecad 100644 --- a/include/qemu/hbitmap.h +++ b/include/qemu/hbitmap.h @@ -292,6 +292,14 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first); */ unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi); +/* hbitmap_next_zero: + * @hb: The HBitmap to operate on + * @start: The bit to start from. + * + * Find next not dirty bit. + */ +int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start); + /* hbitmap_create_meta: * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap. * The caller owns the created bitmap and must call hbitmap_free_meta(hb) to diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c index af41642346..9091c639b3 100644 --- a/tests/test-hbitmap.c +++ b/tests/test-hbitmap.c @@ -925,6 +925,61 @@ static void test_hbitmap_iter_and_reset(TestHBitmapData *data, hbitmap_iter_next(&hbi); } +static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start) +{ + int64_t ret1 = hbitmap_next_zero(data->hb, start); + int64_t ret2 = start; + for ( ; ret2 < data->size && hbitmap_get(data->hb, ret2); ret2++) { + ; + } + if (ret2 == data->size) { + ret2 = -1; + } + + g_assert_cmpint(ret1, ==, ret2); +} + +static void test_hbitmap_next_zero_do(TestHBitmapData *data, int granularity) +{ + hbitmap_test_init(data, L3, granularity); + test_hbitmap_next_zero_check(data, 0); + test_hbitmap_next_zero_check(data, L3 - 1); + + hbitmap_set(data->hb, L2, 1); + test_hbitmap_next_zero_check(data, 0); + test_hbitmap_next_zero_check(data, L2 - 1); + test_hbitmap_next_zero_check(data, L2); + test_hbitmap_next_zero_check(data, L2 + 1); + + hbitmap_set(data->hb, L2 + 5, L1); + test_hbitmap_next_zero_check(data, 0); + test_hbitmap_next_zero_check(data, L2 + 1); + test_hbitmap_next_zero_check(data, L2 + 2); + test_hbitmap_next_zero_check(data, L2 + 5); + test_hbitmap_next_zero_check(data, L2 + L1 - 1); + test_hbitmap_next_zero_check(data, L2 + L1); + + hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2); + test_hbitmap_next_zero_check(data, L2 * 2 - L1); + test_hbitmap_next_zero_check(data, L2 * 2 - 2); + test_hbitmap_next_zero_check(data, L2 * 2 - 1); + test_hbitmap_next_zero_check(data, L2 * 2); + test_hbitmap_next_zero_check(data, L3 - 1); + + hbitmap_set(data->hb, 0, L3); + test_hbitmap_next_zero_check(data, 0); +} + +static void test_hbitmap_next_zero_0(TestHBitmapData *data, const void *unused) +{ + test_hbitmap_next_zero_do(data, 0); +} + +static void test_hbitmap_next_zero_4(TestHBitmapData *data, const void *unused) +{ + test_hbitmap_next_zero_do(data, 4); +} + int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); @@ -985,6 +1040,12 @@ int main(int argc, char **argv) hbitmap_test_add("/hbitmap/iter/iter_and_reset", test_hbitmap_iter_and_reset); + + hbitmap_test_add("/hbitmap/next_zero/next_zero_0", + test_hbitmap_next_zero_0); + hbitmap_test_add("/hbitmap/next_zero/next_zero_4", + test_hbitmap_next_zero_4); + g_test_run(); return 0; diff --git a/util/hbitmap.c b/util/hbitmap.c index 2f9d0fdbd0..289778a55c 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -188,6 +188,45 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first) } } +int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start) +{ + size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL; + unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1]; + uint64_t sz = hb->sizes[HBITMAP_LEVELS - 1]; + unsigned long cur = last_lev[pos]; + unsigned start_bit_offset = + (start >> hb->granularity) & (BITS_PER_LONG - 1); + int64_t res; + + cur |= (1UL << start_bit_offset) - 1; + assert((start >> hb->granularity) < hb->size); + + if (cur == (unsigned long)-1) { + do { + pos++; + } while (pos < sz && last_lev[pos] == (unsigned long)-1); + + if (pos >= sz) { + return -1; + } + + cur = last_lev[pos]; + } + + res = (pos << BITS_PER_LEVEL) + ctol(cur); + if (res >= hb->size) { + return -1; + } + + res = res << hb->granularity; + if (res < start) { + assert(((start - res) >> hb->granularity) == 0); + return start; + } + + return res; +} + bool hbitmap_empty(const HBitmap *hb) { return hb->count == 0; -- cgit v1.2.3 From a193b0f0a8d7735f4eb2ff863fd0902a5fa5eec6 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Thu, 12 Oct 2017 16:53:10 +0300 Subject: backup: move from done_bitmap to copy_bitmap Use HBitmap copy_bitmap instead of done_bitmap. This is needed to improve incremental backup in following patches and to unify backup loop for full/incremental modes in future patches. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Jeff Cody Reviewed-by: Stefan Hajnoczi Reviewed-by: John Snow Message-id: 20171012135313.227864-3-vsementsov@virtuozzo.com Signed-off-by: Jeff Cody --- block/backup.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/block/backup.c b/block/backup.c index 99e6bcc748..5175808093 100644 --- a/block/backup.c +++ b/block/backup.c @@ -40,11 +40,12 @@ typedef struct BackupBlockJob { BlockdevOnError on_target_error; CoRwlock flush_rwlock; uint64_t bytes_read; - unsigned long *done_bitmap; int64_t cluster_size; bool compress; NotifierWithReturn before_write; QLIST_HEAD(, CowRequest) inflight_reqs; + + HBitmap *copy_bitmap; } BackupBlockJob; /* See if in-flight requests overlap and wait for them to complete */ @@ -109,10 +110,11 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, cow_request_begin(&cow_request, job, start, end); for (; start < end; start += job->cluster_size) { - if (test_bit(start / job->cluster_size, job->done_bitmap)) { + if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) { trace_backup_do_cow_skip(job, start); continue; /* already copied */ } + hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1); trace_backup_do_cow_process(job, start); @@ -132,6 +134,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, if (error_is_read) { *error_is_read = true; } + hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1); goto out; } @@ -148,11 +151,10 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, if (error_is_read) { *error_is_read = false; } + hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1); goto out; } - set_bit(start / job->cluster_size, job->done_bitmap); - /* Publish progress, guest I/O counts as progress too. Note that the * offset field is an opaque progress value, it is not a disk offset. */ @@ -260,7 +262,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp) } len = DIV_ROUND_UP(backup_job->common.len, backup_job->cluster_size); - bitmap_zero(backup_job->done_bitmap, len); + hbitmap_set(backup_job->copy_bitmap, 0, len); } void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset, @@ -425,19 +427,22 @@ static void coroutine_fn backup_run(void *opaque) BackupBlockJob *job = opaque; BackupCompleteData *data; BlockDriverState *bs = blk_bs(job->common.blk); - int64_t offset; + int64_t offset, nb_clusters; int ret = 0; QLIST_INIT(&job->inflight_reqs); qemu_co_rwlock_init(&job->flush_rwlock); - job->done_bitmap = bitmap_new(DIV_ROUND_UP(job->common.len, - job->cluster_size)); + nb_clusters = DIV_ROUND_UP(job->common.len, job->cluster_size); + job->copy_bitmap = hbitmap_alloc(nb_clusters, 0); + hbitmap_set(job->copy_bitmap, 0, nb_clusters); job->before_write.notify = backup_before_write_notify; bdrv_add_before_write_notifier(bs, &job->before_write); if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { + /* All bits are set in copy_bitmap to allow any cluster to be copied. + * This does not actually require them to be copied. */ while (!block_job_is_cancelled(&job->common)) { /* Yield until the job is cancelled. We just let our before_write * notify callback service CoW requests. */ @@ -512,7 +517,7 @@ static void coroutine_fn backup_run(void *opaque) /* wait until pending backup_do_cow() calls have completed */ qemu_co_rwlock_wrlock(&job->flush_rwlock); qemu_co_rwlock_unlock(&job->flush_rwlock); - g_free(job->done_bitmap); + hbitmap_free(job->copy_bitmap); data = g_malloc(sizeof(*data)); data->ret = ret; -- cgit v1.2.3 From 8cc6dc6215fa2a278fd853a2caca172e43c6263e Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Thu, 12 Oct 2017 16:53:11 +0300 Subject: backup: init copy_bitmap from sync_bitmap for incremental We should not copy non-dirty clusters in write notifiers. So, initialize copy_bitmap from sync_bitmap. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: John Snow Reviewed-by: Jeff Cody Reviewed-by: Stefan Hajnoczi Message-id: 20171012135313.227864-4-vsementsov@virtuozzo.com Signed-off-by: Jeff Cody --- block/backup.c | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/block/backup.c b/block/backup.c index 5175808093..b8901ea662 100644 --- a/block/backup.c +++ b/block/backup.c @@ -422,6 +422,43 @@ out: return ret; } +/* init copy_bitmap from sync_bitmap */ +static void backup_incremental_init_copy_bitmap(BackupBlockJob *job) +{ + BdrvDirtyBitmapIter *dbi; + int64_t offset; + int64_t end = DIV_ROUND_UP(bdrv_dirty_bitmap_size(job->sync_bitmap), + job->cluster_size); + + dbi = bdrv_dirty_iter_new(job->sync_bitmap); + while ((offset = bdrv_dirty_iter_next(dbi)) != -1) { + int64_t cluster = offset / job->cluster_size; + int64_t next_cluster; + + offset += bdrv_dirty_bitmap_granularity(job->sync_bitmap); + if (offset >= bdrv_dirty_bitmap_size(job->sync_bitmap)) { + hbitmap_set(job->copy_bitmap, cluster, end - cluster); + break; + } + + offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset); + if (offset == -1) { + hbitmap_set(job->copy_bitmap, cluster, end - cluster); + break; + } + + next_cluster = DIV_ROUND_UP(offset, job->cluster_size); + hbitmap_set(job->copy_bitmap, cluster, next_cluster - cluster); + if (next_cluster >= end) { + break; + } + + bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size); + } + + bdrv_dirty_iter_free(dbi); +} + static void coroutine_fn backup_run(void *opaque) { BackupBlockJob *job = opaque; @@ -435,7 +472,12 @@ static void coroutine_fn backup_run(void *opaque) nb_clusters = DIV_ROUND_UP(job->common.len, job->cluster_size); job->copy_bitmap = hbitmap_alloc(nb_clusters, 0); - hbitmap_set(job->copy_bitmap, 0, nb_clusters); + if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { + backup_incremental_init_copy_bitmap(job); + } else { + hbitmap_set(job->copy_bitmap, 0, nb_clusters); + } + job->before_write.notify = backup_before_write_notify; bdrv_add_before_write_notifier(bs, &job->before_write); -- cgit v1.2.3 From 085bd08e6f32f0d96885ff8e0fa2896c2fabed50 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Thu, 12 Oct 2017 16:53:12 +0300 Subject: backup: simplify non-dirty bits progress processing Set fake progress for non-dirty clusters in copy_bitmap initialization, to. It simplifies code and allows further refactoring. This patch changes user's view of backup progress, but formally it doesn't changed: progress hops are just moved to the beginning. Actually it's just a point of view: when do we actually skip clusters? We can say in the very beginning, that we skip these clusters and do not think about them later. Of course, if go through disk sequentially, it's logical to say, that we skip clusters between copied portions to the left and to the right of them. But even now copying progress is not sequential because of write notifiers. Future patches will introduce new backup architecture which will do copying in several coroutines in parallel, so it will make no sense to publish fake progress by parts in parallel with other copying requests. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: John Snow Reviewed-by: Stefan Hajnoczi Reviewed-by: Jeff Cody Message-id: 20171012135313.227864-5-vsementsov@virtuozzo.com Signed-off-by: Jeff Cody --- block/backup.c | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/block/backup.c b/block/backup.c index b8901ea662..8ee220076b 100644 --- a/block/backup.c +++ b/block/backup.c @@ -369,7 +369,6 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job) int64_t offset; int64_t cluster; int64_t end; - int64_t last_cluster = -1; BdrvDirtyBitmapIter *dbi; granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); @@ -380,12 +379,6 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job) while ((offset = bdrv_dirty_iter_next(dbi)) >= 0) { cluster = offset / job->cluster_size; - /* Fake progress updates for any clusters we skipped */ - if (cluster != last_cluster + 1) { - job->common.offset += ((cluster - last_cluster - 1) * - job->cluster_size); - } - for (end = cluster + clusters_per_iter; cluster < end; cluster++) { do { if (yield_and_check(job)) { @@ -407,14 +400,6 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job) if (granularity < job->cluster_size) { bdrv_set_dirty_iter(dbi, cluster * job->cluster_size); } - - last_cluster = cluster - 1; - } - - /* Play some final catchup with the progress meter */ - end = DIV_ROUND_UP(job->common.len, job->cluster_size); - if (last_cluster + 1 < end) { - job->common.offset += ((end - last_cluster - 1) * job->cluster_size); } out: @@ -456,6 +441,9 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job) bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size); } + job->common.offset = job->common.len - + hbitmap_count(job->copy_bitmap) * job->cluster_size; + bdrv_dirty_iter_free(dbi); } -- cgit v1.2.3 From 53f1c8794f2c1aea4d2888a3ac4e1b3b8b8b9777 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Thu, 12 Oct 2017 16:53:13 +0300 Subject: backup: use copy_bitmap in incremental backup We can use copy_bitmap instead of sync_bitmap. copy_bitmap is initialized from sync_bitmap and it is more informative: we will not try to process data, that is already in progress (by write notifier). Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Stefan Hajnoczi Reviewed-by: John Snow Reviewed-by: Jeff Cody Message-id: 20171012135313.227864-6-vsementsov@virtuozzo.com Signed-off-by: Jeff Cody --- block/backup.c | 55 +++++++++++++++++-------------------------------------- 1 file changed, 17 insertions(+), 38 deletions(-) diff --git a/block/backup.c b/block/backup.c index 8ee220076b..4a16a37229 100644 --- a/block/backup.c +++ b/block/backup.c @@ -362,49 +362,28 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job) static int coroutine_fn backup_run_incremental(BackupBlockJob *job) { + int ret; bool error_is_read; - int ret = 0; - int clusters_per_iter; - uint32_t granularity; - int64_t offset; int64_t cluster; - int64_t end; - BdrvDirtyBitmapIter *dbi; - - granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); - clusters_per_iter = MAX((granularity / job->cluster_size), 1); - dbi = bdrv_dirty_iter_new(job->sync_bitmap); - - /* Find the next dirty sector(s) */ - while ((offset = bdrv_dirty_iter_next(dbi)) >= 0) { - cluster = offset / job->cluster_size; - - for (end = cluster + clusters_per_iter; cluster < end; cluster++) { - do { - if (yield_and_check(job)) { - goto out; - } - ret = backup_do_cow(job, cluster * job->cluster_size, - job->cluster_size, &error_is_read, - false); - if ((ret < 0) && - backup_error_action(job, error_is_read, -ret) == - BLOCK_ERROR_ACTION_REPORT) { - goto out; - } - } while (ret < 0); - } + HBitmapIter hbi; - /* If the bitmap granularity is smaller than the backup granularity, - * we need to advance the iterator pointer to the next cluster. */ - if (granularity < job->cluster_size) { - bdrv_set_dirty_iter(dbi, cluster * job->cluster_size); - } + hbitmap_iter_init(&hbi, job->copy_bitmap, 0); + while ((cluster = hbitmap_iter_next(&hbi)) != -1) { + do { + if (yield_and_check(job)) { + return 0; + } + ret = backup_do_cow(job, cluster * job->cluster_size, + job->cluster_size, &error_is_read, false); + if (ret < 0 && backup_error_action(job, error_is_read, -ret) == + BLOCK_ERROR_ACTION_REPORT) + { + return ret; + } + } while (ret < 0); } -out: - bdrv_dirty_iter_free(dbi); - return ret; + return 0; } /* init copy_bitmap from sync_bitmap */ -- cgit v1.2.3 From aa9ef2e65bed6a8f1709e0523fc856ec08beb657 Mon Sep 17 00:00:00 2001 From: John Snow Date: Wed, 13 Dec 2017 15:46:11 -0500 Subject: blockjob: kick jobs on set-speed If users set an unreasonably low speed (like one byte per second), the calculated delay may exceed many hours. While we like to punish users for asking for stupid things, we do also like to allow users to correct their wicked ways. When a user provides a new speed, kick the job to allow it to recalculate its delay. Signed-off-by: John Snow Reviewed-by: Stefan Hajnoczi Message-id: 20171213204611.26276-1-jsnow@redhat.com Signed-off-by: Jeff Cody --- blockjob.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/blockjob.c b/blockjob.c index 715c2c2680..6173e4728c 100644 --- a/blockjob.c +++ b/blockjob.c @@ -59,6 +59,7 @@ static void __attribute__((__constructor__)) block_job_init(void) static void block_job_event_cancelled(BlockJob *job); static void block_job_event_completed(BlockJob *job, const char *msg); +static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job)); /* Transactional group of block jobs */ struct BlockJobTxn { @@ -480,9 +481,16 @@ static void block_job_completed_txn_success(BlockJob *job) } } +/* Assumes the block_job_mutex is held */ +static bool block_job_timer_pending(BlockJob *job) +{ + return timer_pending(&job->sleep_timer); +} + void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) { Error *local_err = NULL; + int64_t old_speed = job->speed; if (!job->driver->set_speed) { error_setg(errp, QERR_UNSUPPORTED); @@ -495,6 +503,12 @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) } job->speed = speed; + if (speed <= old_speed) { + return; + } + + /* kick only if a timer is pending */ + block_job_enter_cond(job, block_job_timer_pending); } void block_job_complete(BlockJob *job, Error **errp) @@ -821,7 +835,11 @@ void block_job_resume_all(void) } } -void block_job_enter(BlockJob *job) +/* + * Conditionally enter a block_job pending a call to fn() while + * under the block_job_lock critical section. + */ +static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job)) { if (!block_job_started(job)) { return; @@ -836,6 +854,11 @@ void block_job_enter(BlockJob *job) return; } + if (fn && !fn(job)) { + block_job_unlock(); + return; + } + assert(!job->deferred_to_main_loop); timer_del(&job->sleep_timer); job->busy = true; @@ -843,6 +866,11 @@ void block_job_enter(BlockJob *job) aio_co_wake(job->co); } +void block_job_enter(BlockJob *job) +{ + block_job_enter_cond(job, NULL); +} + bool block_job_is_cancelled(BlockJob *job) { return job->cancelled; -- cgit v1.2.3 From ac90dad94b5b1eda18a9a86c739c249d851cd35c Mon Sep 17 00:00:00 2001 From: Jeff Cody Date: Tue, 7 Nov 2017 17:27:20 -0500 Subject: block/sheepdog: remove spurious NULL check 'tag' is already checked in the lines immediately preceding this check, and set to non-NULL if NULL. No need to check again, it hasn't changed. Signed-off-by: Jeff Cody Reviewed-by: Eric Blake Reviewed-by: Darren Kenny Signed-off-by: Jeff Cody --- block/sheepdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/sheepdog.c b/block/sheepdog.c index 696a71442a..459d93a35f 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -1632,7 +1632,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags, if (!tag) { tag = ""; } - if (tag && strlen(tag) >= SD_MAX_VDI_TAG_LEN) { + if (strlen(tag) >= SD_MAX_VDI_TAG_LEN) { error_setg(errp, "value of parameter 'tag' is too long"); ret = -EINVAL; goto err_no_fd; -- cgit v1.2.3 From d507c5f682d23a60a356a081557ceb34ea0d2669 Mon Sep 17 00:00:00 2001 From: Jeff Cody Date: Tue, 7 Nov 2017 17:27:21 -0500 Subject: block/sheepdog: code beautification No functional changes, just whitespace manipulation. Signed-off-by: Jeff Cody Reviewed-by: Eric Blake Reviewed-by: Darren Kenny Signed-off-by: Jeff Cody --- block/sheepdog.c | 164 +++++++++++++++++++++++++++---------------------------- 1 file changed, 82 insertions(+), 82 deletions(-) diff --git a/block/sheepdog.c b/block/sheepdog.c index 459d93a35f..488bad333b 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -400,7 +400,7 @@ typedef struct BDRVSheepdogReopenState { int cache_flags; } BDRVSheepdogReopenState; -static const char * sd_strerror(int err) +static const char *sd_strerror(int err) { int i; @@ -3078,111 +3078,111 @@ static QemuOptsList sd_create_opts = { }; static BlockDriver bdrv_sheepdog = { - .format_name = "sheepdog", - .protocol_name = "sheepdog", - .instance_size = sizeof(BDRVSheepdogState), - .bdrv_parse_filename = sd_parse_filename, - .bdrv_file_open = sd_open, - .bdrv_reopen_prepare = sd_reopen_prepare, - .bdrv_reopen_commit = sd_reopen_commit, - .bdrv_reopen_abort = sd_reopen_abort, - .bdrv_close = sd_close, - .bdrv_create = sd_create, - .bdrv_has_zero_init = bdrv_has_zero_init_1, - .bdrv_getlength = sd_getlength, + .format_name = "sheepdog", + .protocol_name = "sheepdog", + .instance_size = sizeof(BDRVSheepdogState), + .bdrv_parse_filename = sd_parse_filename, + .bdrv_file_open = sd_open, + .bdrv_reopen_prepare = sd_reopen_prepare, + .bdrv_reopen_commit = sd_reopen_commit, + .bdrv_reopen_abort = sd_reopen_abort, + .bdrv_close = sd_close, + .bdrv_create = sd_create, + .bdrv_has_zero_init = bdrv_has_zero_init_1, + .bdrv_getlength = sd_getlength, .bdrv_get_allocated_file_size = sd_get_allocated_file_size, - .bdrv_truncate = sd_truncate, + .bdrv_truncate = sd_truncate, - .bdrv_co_readv = sd_co_readv, - .bdrv_co_writev = sd_co_writev, - .bdrv_co_flush_to_disk = sd_co_flush_to_disk, - .bdrv_co_pdiscard = sd_co_pdiscard, - .bdrv_co_get_block_status = sd_co_get_block_status, + .bdrv_co_readv = sd_co_readv, + .bdrv_co_writev = sd_co_writev, + .bdrv_co_flush_to_disk = sd_co_flush_to_disk, + .bdrv_co_pdiscard = sd_co_pdiscard, + .bdrv_co_get_block_status = sd_co_get_block_status, - .bdrv_snapshot_create = sd_snapshot_create, - .bdrv_snapshot_goto = sd_snapshot_goto, - .bdrv_snapshot_delete = sd_snapshot_delete, - .bdrv_snapshot_list = sd_snapshot_list, + .bdrv_snapshot_create = sd_snapshot_create, + .bdrv_snapshot_goto = sd_snapshot_goto, + .bdrv_snapshot_delete = sd_snapshot_delete, + .bdrv_snapshot_list = sd_snapshot_list, - .bdrv_save_vmstate = sd_save_vmstate, - .bdrv_load_vmstate = sd_load_vmstate, + .bdrv_save_vmstate = sd_save_vmstate, + .bdrv_load_vmstate = sd_load_vmstate, - .bdrv_detach_aio_context = sd_detach_aio_context, - .bdrv_attach_aio_context = sd_attach_aio_context, + .bdrv_detach_aio_context = sd_detach_aio_context, + .bdrv_attach_aio_context = sd_attach_aio_context, - .create_opts = &sd_create_opts, + .create_opts = &sd_create_opts, }; static BlockDriver bdrv_sheepdog_tcp = { - .format_name = "sheepdog", - .protocol_name = "sheepdog+tcp", - .instance_size = sizeof(BDRVSheepdogState), - .bdrv_parse_filename = sd_parse_filename, - .bdrv_file_open = sd_open, - .bdrv_reopen_prepare = sd_reopen_prepare, - .bdrv_reopen_commit = sd_reopen_commit, - .bdrv_reopen_abort = sd_reopen_abort, - .bdrv_close = sd_close, - .bdrv_create = sd_create, - .bdrv_has_zero_init = bdrv_has_zero_init_1, - .bdrv_getlength = sd_getlength, + .format_name = "sheepdog", + .protocol_name = "sheepdog+tcp", + .instance_size = sizeof(BDRVSheepdogState), + .bdrv_parse_filename = sd_parse_filename, + .bdrv_file_open = sd_open, + .bdrv_reopen_prepare = sd_reopen_prepare, + .bdrv_reopen_commit = sd_reopen_commit, + .bdrv_reopen_abort = sd_reopen_abort, + .bdrv_close = sd_close, + .bdrv_create = sd_create, + .bdrv_has_zero_init = bdrv_has_zero_init_1, + .bdrv_getlength = sd_getlength, .bdrv_get_allocated_file_size = sd_get_allocated_file_size, - .bdrv_truncate = sd_truncate, + .bdrv_truncate = sd_truncate, - .bdrv_co_readv = sd_co_readv, - .bdrv_co_writev = sd_co_writev, - .bdrv_co_flush_to_disk = sd_co_flush_to_disk, - .bdrv_co_pdiscard = sd_co_pdiscard, - .bdrv_co_get_block_status = sd_co_get_block_status, + .bdrv_co_readv = sd_co_readv, + .bdrv_co_writev = sd_co_writev, + .bdrv_co_flush_to_disk = sd_co_flush_to_disk, + .bdrv_co_pdiscard = sd_co_pdiscard, + .bdrv_co_get_block_status = sd_co_get_block_status, - .bdrv_snapshot_create = sd_snapshot_create, - .bdrv_snapshot_goto = sd_snapshot_goto, - .bdrv_snapshot_delete = sd_snapshot_delete, - .bdrv_snapshot_list = sd_snapshot_list, + .bdrv_snapshot_create = sd_snapshot_create, + .bdrv_snapshot_goto = sd_snapshot_goto, + .bdrv_snapshot_delete = sd_snapshot_delete, + .bdrv_snapshot_list = sd_snapshot_list, - .bdrv_save_vmstate = sd_save_vmstate, - .bdrv_load_vmstate = sd_load_vmstate, + .bdrv_save_vmstate = sd_save_vmstate, + .bdrv_load_vmstate = sd_load_vmstate, - .bdrv_detach_aio_context = sd_detach_aio_context, - .bdrv_attach_aio_context = sd_attach_aio_context, + .bdrv_detach_aio_context = sd_detach_aio_context, + .bdrv_attach_aio_context = sd_attach_aio_context, - .create_opts = &sd_create_opts, + .create_opts = &sd_create_opts, }; static BlockDriver bdrv_sheepdog_unix = { - .format_name = "sheepdog", - .protocol_name = "sheepdog+unix", - .instance_size = sizeof(BDRVSheepdogState), - .bdrv_parse_filename = sd_parse_filename, - .bdrv_file_open = sd_open, - .bdrv_reopen_prepare = sd_reopen_prepare, - .bdrv_reopen_commit = sd_reopen_commit, - .bdrv_reopen_abort = sd_reopen_abort, - .bdrv_close = sd_close, - .bdrv_create = sd_create, - .bdrv_has_zero_init = bdrv_has_zero_init_1, - .bdrv_getlength = sd_getlength, + .format_name = "sheepdog", + .protocol_name = "sheepdog+unix", + .instance_size = sizeof(BDRVSheepdogState), + .bdrv_parse_filename = sd_parse_filename, + .bdrv_file_open = sd_open, + .bdrv_reopen_prepare = sd_reopen_prepare, + .bdrv_reopen_commit = sd_reopen_commit, + .bdrv_reopen_abort = sd_reopen_abort, + .bdrv_close = sd_close, + .bdrv_create = sd_create, + .bdrv_has_zero_init = bdrv_has_zero_init_1, + .bdrv_getlength = sd_getlength, .bdrv_get_allocated_file_size = sd_get_allocated_file_size, - .bdrv_truncate = sd_truncate, + .bdrv_truncate = sd_truncate, - .bdrv_co_readv = sd_co_readv, - .bdrv_co_writev = sd_co_writev, - .bdrv_co_flush_to_disk = sd_co_flush_to_disk, - .bdrv_co_pdiscard = sd_co_pdiscard, - .bdrv_co_get_block_status = sd_co_get_block_status, + .bdrv_co_readv = sd_co_readv, + .bdrv_co_writev = sd_co_writev, + .bdrv_co_flush_to_disk = sd_co_flush_to_disk, + .bdrv_co_pdiscard = sd_co_pdiscard, + .bdrv_co_get_block_status = sd_co_get_block_status, - .bdrv_snapshot_create = sd_snapshot_create, - .bdrv_snapshot_goto = sd_snapshot_goto, - .bdrv_snapshot_delete = sd_snapshot_delete, - .bdrv_snapshot_list = sd_snapshot_list, + .bdrv_snapshot_create = sd_snapshot_create, + .bdrv_snapshot_goto = sd_snapshot_goto, + .bdrv_snapshot_delete = sd_snapshot_delete, + .bdrv_snapshot_list = sd_snapshot_list, - .bdrv_save_vmstate = sd_save_vmstate, - .bdrv_load_vmstate = sd_load_vmstate, + .bdrv_save_vmstate = sd_save_vmstate, + .bdrv_load_vmstate = sd_load_vmstate, - .bdrv_detach_aio_context = sd_detach_aio_context, - .bdrv_attach_aio_context = sd_attach_aio_context, + .bdrv_detach_aio_context = sd_detach_aio_context, + .bdrv_attach_aio_context = sd_attach_aio_context, - .create_opts = &sd_create_opts, + .create_opts = &sd_create_opts, }; static void bdrv_sheepdog_init(void) -- cgit v1.2.3 From 2d25964d1831c99d54981e8b615eba5dd6a63e36 Mon Sep 17 00:00:00 2001 From: Jeff Cody Date: Tue, 7 Nov 2017 17:27:22 -0500 Subject: block/curl: check error return of curl_global_init() If curl_global_init() fails, per the documentation no other curl functions may be called, so make sure to check the return value. Also, some minor changes to the initialization latch variable 'inited': - Make it static in the file, for clarity - Change the name for clarity - Make it a bool Signed-off-by: Jeff Cody Reviewed-by: Eric Blake Reviewed-by: Richard W.M. Jones Reviewed-by: Darren Kenny Signed-off-by: Jeff Cody --- block/curl.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/block/curl.c b/block/curl.c index 2a244e2439..00a98799b6 100644 --- a/block/curl.c +++ b/block/curl.c @@ -89,6 +89,8 @@ static CURLMcode __curl_multi_socket_action(CURLM *multi_handle, struct BDRVCURLState; +static bool libcurl_initialized; + typedef struct CURLAIOCB { Coroutine *co; QEMUIOVector *qiov; @@ -686,14 +688,23 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, double d; const char *secretid; const char *protocol_delimiter; + int ret; - static int inited = 0; if (flags & BDRV_O_RDWR) { error_setg(errp, "curl block device does not support writes"); return -EROFS; } + if (!libcurl_initialized) { + ret = curl_global_init(CURL_GLOBAL_ALL); + if (ret) { + error_setg(errp, "libcurl initialization failed with %d", ret); + return -EIO; + } + libcurl_initialized = true; + } + qemu_mutex_init(&s->mutex); opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); @@ -772,11 +783,6 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, } } - if (!inited) { - curl_global_init(CURL_GLOBAL_ALL); - inited = 1; - } - DPRINTF("CURL: Opening %s\n", file); QSIMPLEQ_INIT(&s->free_state_waitq); s->aio_context = bdrv_get_aio_context(bs); -- cgit v1.2.3 From 996922de45299878cdc4c15b72b19edf2bc618a4 Mon Sep 17 00:00:00 2001 From: Jeff Cody Date: Tue, 7 Nov 2017 17:27:23 -0500 Subject: block/curl: fix minor memory leaks Signed-off-by: Jeff Cody Reviewed-by: Richard W.M. Jones Signed-off-by: Jeff Cody --- block/curl.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/block/curl.c b/block/curl.c index 00a98799b6..35cf417f59 100644 --- a/block/curl.c +++ b/block/curl.c @@ -857,6 +857,9 @@ out_noclean: qemu_mutex_destroy(&s->mutex); g_free(s->cookie); g_free(s->url); + g_free(s->username); + g_free(s->proxyusername); + g_free(s->proxypassword); qemu_opts_del(opts); return -EINVAL; } @@ -955,6 +958,9 @@ static void curl_close(BlockDriverState *bs) g_free(s->cookie); g_free(s->url); + g_free(s->username); + g_free(s->proxyusername); + g_free(s->proxypassword); } static int64_t curl_getlength(BlockDriverState *bs) -- cgit v1.2.3