aboutsummaryrefslogtreecommitdiff
path: root/block/backup.c
diff options
context:
space:
mode:
authorJohn Snow <jsnow@redhat.com>2016-11-08 01:50:35 -0500
committerJeff Cody <jcody@redhat.com>2016-11-14 22:47:34 -0500
commite8a40bf71d606f9f87866fb2461eaed52814b38e (patch)
tree4474066f2b85abd51936afb6e2bde28d3866a8ee /block/backup.c
parent1e93b9fba2e0e23105e91574fcfc193f899d8e74 (diff)
blockjob: add .clean property
Cleaning up after we have deferred to the main thread but before the transaction has converged can be dangerous and result in deadlocks if the job cleanup invokes any BH polling loops. A job may attempt to begin cleaning up, but may induce another job to enter its cleanup routine. The second job, part of our same transaction, will block waiting for the first job to finish, so neither job may now make progress. To rectify this, allow jobs to register a cleanup operation that will always run regardless of if the job was in a transaction or not, and if the transaction job group completed successfully or not. Move sensitive cleanup to this callback instead which is guaranteed to be run only after the transaction has converged, which removes sensitive timing constraints from said cleanup. Furthermore, in future patches these cleanup operations will be performed regardless of whether or not we actually started the job. Therefore, cleanup callbacks should essentially confine themselves to undoing create operations, e.g. setup actions taken in what is now backup_start. Reported-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Signed-off-by: John Snow <jsnow@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Message-id: 1478587839-9834-3-git-send-email-jsnow@redhat.com Signed-off-by: Jeff Cody <jcody@redhat.com>
Diffstat (limited to 'block/backup.c')
-rw-r--r--block/backup.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/block/backup.c b/block/backup.c
index 7b5d8a3757..734a24c698 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -242,6 +242,14 @@ static void backup_abort(BlockJob *job)
}
}
+static void backup_clean(BlockJob *job)
+{
+ BackupBlockJob *s = container_of(job, BackupBlockJob, common);
+ assert(s->target);
+ blk_unref(s->target);
+ s->target = NULL;
+}
+
static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common);
@@ -321,6 +329,7 @@ static const BlockJobDriver backup_job_driver = {
.set_speed = backup_set_speed,
.commit = backup_commit,
.abort = backup_abort,
+ .clean = backup_clean,
.attached_aio_context = backup_attached_aio_context,
.drain = backup_drain,
};
@@ -343,12 +352,8 @@ typedef struct {
static void backup_complete(BlockJob *job, void *opaque)
{
- BackupBlockJob *s = container_of(job, BackupBlockJob, common);
BackupCompleteData *data = opaque;
- blk_unref(s->target);
- s->target = NULL;
-
block_job_completed(job, data->ret);
g_free(data);
}
@@ -658,7 +663,7 @@ void backup_start(const char *job_id, BlockDriverState *bs,
bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
}
if (job) {
- blk_unref(job->target);
+ backup_clean(&job->common);
block_job_unref(&job->common);
}
}