aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/amend.c28
-rw-r--r--block/backup.c1
-rw-r--r--block/block-backend.c166
-rw-r--r--block/commit.c4
-rw-r--r--block/copy-before-write.c2
-rw-r--r--block/copy-before-write.h7
-rw-r--r--block/coroutines.h81
-rw-r--r--block/create.c2
-rw-r--r--block/crypto.c68
-rw-r--r--block/dirty-bitmap.c5
-rw-r--r--block/export/export.c2
-rw-r--r--block/export/fuse.c25
-rw-r--r--block/io.c75
-rw-r--r--block/meson.build7
-rw-r--r--block/mirror.c4
-rw-r--r--block/monitor/bitmap-qmp-cmds.c6
-rw-r--r--block/nbd.c1
-rw-r--r--block/parallels.c2
-rw-r--r--block/snapshot.c28
-rw-r--r--block/stream.c2
20 files changed, 445 insertions, 71 deletions
diff --git a/block/amend.c b/block/amend.c
index 392df9ef83..f696a006e3 100644
--- a/block/amend.c
+++ b/block/amend.c
@@ -53,10 +53,31 @@ static int coroutine_fn blockdev_amend_run(Job *job, Error **errp)
return ret;
}
+static int blockdev_amend_pre_run(BlockdevAmendJob *s, Error **errp)
+{
+ if (s->bs->drv->bdrv_amend_pre_run) {
+ return s->bs->drv->bdrv_amend_pre_run(s->bs, errp);
+ }
+
+ return 0;
+}
+
+static void blockdev_amend_free(Job *job)
+{
+ BlockdevAmendJob *s = container_of(job, BlockdevAmendJob, common);
+
+ if (s->bs->drv->bdrv_amend_clean) {
+ s->bs->drv->bdrv_amend_clean(s->bs);
+ }
+
+ bdrv_unref(s->bs);
+}
+
static const JobDriver blockdev_amend_job_driver = {
.instance_size = sizeof(BlockdevAmendJob),
.job_type = JOB_TYPE_AMEND,
.run = blockdev_amend_run,
+ .free = blockdev_amend_free,
};
void qmp_x_blockdev_amend(const char *job_id,
@@ -110,8 +131,15 @@ void qmp_x_blockdev_amend(const char *job_id,
return;
}
+ bdrv_ref(bs);
s->bs = bs,
s->opts = QAPI_CLONE(BlockdevAmendOptions, options),
s->force = has_force ? force : false;
+
+ if (blockdev_amend_pre_run(s, errp)) {
+ job_early_fail(&s->common);
+ return;
+ }
+
job_start(&s->common);
}
diff --git a/block/backup.c b/block/backup.c
index 21d5983779..5cfd0b999c 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -372,6 +372,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
assert(bs);
assert(target);
+ GLOBAL_STATE_CODE();
/* QMP interface protects us from these cases */
assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL);
diff --git a/block/block-backend.c b/block/block-backend.c
index 4ff6b4d785..e0e1aff4b1 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -79,6 +79,7 @@ struct BlockBackend {
bool allow_aio_context_change;
bool allow_write_beyond_eof;
+ /* Protected by BQL */
NotifierList remove_bs_notifiers, insert_bs_notifiers;
QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
@@ -111,12 +112,14 @@ static const AIOCBInfo block_backend_aiocb_info = {
static void drive_info_del(DriveInfo *dinfo);
static BlockBackend *bdrv_first_blk(BlockDriverState *bs);
-/* All BlockBackends */
+/* All BlockBackends. Protected by BQL. */
static QTAILQ_HEAD(, BlockBackend) block_backends =
QTAILQ_HEAD_INITIALIZER(block_backends);
-/* All BlockBackends referenced by the monitor and which are iterated through by
- * blk_next() */
+/*
+ * All BlockBackends referenced by the monitor and which are iterated through by
+ * blk_next(). Protected by BQL.
+ */
static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
@@ -236,6 +239,7 @@ static void blk_root_activate(BdrvChild *child, Error **errp)
void blk_set_force_allow_inactivate(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
blk->force_allow_inactivate = true;
}
@@ -354,6 +358,8 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
{
BlockBackend *blk;
+ GLOBAL_STATE_CODE();
+
blk = g_new0(BlockBackend, 1);
blk->refcnt = 1;
blk->ctx = ctx;
@@ -391,6 +397,8 @@ BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm,
{
BlockBackend *blk = blk_new(bdrv_get_aio_context(bs), perm, shared_perm);
+ GLOBAL_STATE_CODE();
+
if (blk_insert_bs(blk, bs, errp) < 0) {
blk_unref(blk);
return NULL;
@@ -419,6 +427,8 @@ BlockBackend *blk_new_open(const char *filename, const char *reference,
uint64_t perm = 0;
uint64_t shared = BLK_PERM_ALL;
+ GLOBAL_STATE_CODE();
+
/*
* blk_new_open() is mainly used in .bdrv_create implementations and the
* tools where sharing isn't a major concern because the BDS stays private
@@ -496,6 +506,7 @@ static void drive_info_del(DriveInfo *dinfo)
int blk_get_refcnt(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
return blk ? blk->refcnt : 0;
}
@@ -506,6 +517,7 @@ int blk_get_refcnt(BlockBackend *blk)
void blk_ref(BlockBackend *blk)
{
assert(blk->refcnt > 0);
+ GLOBAL_STATE_CODE();
blk->refcnt++;
}
@@ -516,6 +528,7 @@ void blk_ref(BlockBackend *blk)
*/
void blk_unref(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
if (blk) {
assert(blk->refcnt > 0);
if (blk->refcnt > 1) {
@@ -536,6 +549,7 @@ void blk_unref(BlockBackend *blk)
*/
BlockBackend *blk_all_next(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
return blk ? QTAILQ_NEXT(blk, link)
: QTAILQ_FIRST(&block_backends);
}
@@ -544,6 +558,8 @@ void blk_remove_all_bs(void)
{
BlockBackend *blk = NULL;
+ GLOBAL_STATE_CODE();
+
while ((blk = blk_all_next(blk)) != NULL) {
AioContext *ctx = blk_get_aio_context(blk);
@@ -567,6 +583,7 @@ void blk_remove_all_bs(void)
*/
BlockBackend *blk_next(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
return blk ? QTAILQ_NEXT(blk, monitor_link)
: QTAILQ_FIRST(&monitor_block_backends);
}
@@ -633,6 +650,7 @@ static void bdrv_next_reset(BdrvNextIterator *it)
BlockDriverState *bdrv_first(BdrvNextIterator *it)
{
+ GLOBAL_STATE_CODE();
bdrv_next_reset(it);
return bdrv_next(it);
}
@@ -670,6 +688,7 @@ bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
{
assert(!blk->name);
assert(name && name[0]);
+ GLOBAL_STATE_CODE();
if (!id_wellformed(name)) {
error_setg(errp, "Invalid device name");
@@ -697,6 +716,8 @@ bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
*/
void monitor_remove_blk(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
+
if (!blk->name) {
return;
}
@@ -712,6 +733,7 @@ void monitor_remove_blk(BlockBackend *blk)
*/
const char *blk_name(const BlockBackend *blk)
{
+ IO_CODE();
return blk->name ?: "";
}
@@ -723,6 +745,7 @@ BlockBackend *blk_by_name(const char *name)
{
BlockBackend *blk = NULL;
+ GLOBAL_STATE_CODE();
assert(name);
while ((blk = blk_next(blk)) != NULL) {
if (!strcmp(name, blk->name)) {
@@ -737,12 +760,16 @@ BlockBackend *blk_by_name(const char *name)
*/
BlockDriverState *blk_bs(BlockBackend *blk)
{
+ IO_CODE();
return blk->root ? blk->root->bs : NULL;
}
static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
{
BdrvChild *child;
+
+ GLOBAL_STATE_CODE();
+
QLIST_FOREACH(child, &bs->parents, next_parent) {
if (child->klass == &child_root) {
return child->opaque;
@@ -757,6 +784,7 @@ static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
*/
bool bdrv_has_blk(BlockDriverState *bs)
{
+ GLOBAL_STATE_CODE();
return bdrv_first_blk(bs) != NULL;
}
@@ -767,6 +795,7 @@ bool bdrv_is_root_node(BlockDriverState *bs)
{
BdrvChild *c;
+ GLOBAL_STATE_CODE();
QLIST_FOREACH(c, &bs->parents, next_parent) {
if (c->klass != &child_root) {
return false;
@@ -781,6 +810,7 @@ bool bdrv_is_root_node(BlockDriverState *bs)
*/
DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
return blk->legacy_dinfo;
}
@@ -792,6 +822,7 @@ DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
{
assert(!blk->legacy_dinfo);
+ GLOBAL_STATE_CODE();
return blk->legacy_dinfo = dinfo;
}
@@ -802,6 +833,7 @@ DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
{
BlockBackend *blk = NULL;
+ GLOBAL_STATE_CODE();
while ((blk = blk_next(blk)) != NULL) {
if (blk->legacy_dinfo == dinfo) {
@@ -816,6 +848,7 @@ BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
*/
BlockBackendPublic *blk_get_public(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
return &blk->public;
}
@@ -824,6 +857,7 @@ BlockBackendPublic *blk_get_public(BlockBackend *blk)
*/
BlockBackend *blk_by_public(BlockBackendPublic *public)
{
+ GLOBAL_STATE_CODE();
return container_of(public, BlockBackend, public);
}
@@ -835,6 +869,8 @@ void blk_remove_bs(BlockBackend *blk)
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
BdrvChild *root;
+ GLOBAL_STATE_CODE();
+
notifier_list_notify(&blk->remove_bs_notifiers, blk);
if (tgm->throttle_state) {
BlockDriverState *bs = blk_bs(blk);
@@ -869,6 +905,7 @@ void blk_remove_bs(BlockBackend *blk)
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
{
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
+ GLOBAL_STATE_CODE();
bdrv_ref(bs);
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
@@ -892,6 +929,7 @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
*/
int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp)
{
+ GLOBAL_STATE_CODE();
return bdrv_replace_child_bs(blk->root, new_bs, errp);
}
@@ -902,6 +940,7 @@ int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
Error **errp)
{
int ret;
+ GLOBAL_STATE_CODE();
if (blk->root && !blk->disable_perm) {
ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp);
@@ -918,6 +957,7 @@ int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm)
{
+ GLOBAL_STATE_CODE();
*perm = blk->perm;
*shared_perm = blk->shared_perm;
}
@@ -928,6 +968,7 @@ void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm)
*/
int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
{
+ GLOBAL_STATE_CODE();
if (blk->dev) {
return -EBUSY;
}
@@ -953,6 +994,7 @@ int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
void blk_detach_dev(BlockBackend *blk, DeviceState *dev)
{
assert(blk->dev == dev);
+ GLOBAL_STATE_CODE();
blk->dev = NULL;
blk->dev_ops = NULL;
blk->dev_opaque = NULL;
@@ -966,6 +1008,7 @@ void blk_detach_dev(BlockBackend *blk, DeviceState *dev)
*/
DeviceState *blk_get_attached_dev(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
return blk->dev;
}
@@ -974,6 +1017,7 @@ DeviceState *blk_get_attached_dev(BlockBackend *blk)
char *blk_get_attached_dev_id(BlockBackend *blk)
{
DeviceState *dev = blk->dev;
+ IO_CODE();
if (!dev) {
return g_strdup("");
@@ -994,6 +1038,8 @@ BlockBackend *blk_by_dev(void *dev)
{
BlockBackend *blk = NULL;
+ GLOBAL_STATE_CODE();
+
assert(dev != NULL);
while ((blk = blk_all_next(blk)) != NULL) {
if (blk->dev == dev) {
@@ -1011,6 +1057,7 @@ BlockBackend *blk_by_dev(void *dev)
void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
void *opaque)
{
+ GLOBAL_STATE_CODE();
blk->dev_ops = ops;
blk->dev_opaque = opaque;
@@ -1032,6 +1079,7 @@ void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
*/
void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp)
{
+ GLOBAL_STATE_CODE();
if (blk->dev_ops && blk->dev_ops->change_media_cb) {
bool tray_was_open, tray_is_open;
Error *local_err = NULL;
@@ -1064,6 +1112,7 @@ static void blk_root_change_media(BdrvChild *child, bool load)
*/
bool blk_dev_has_removable_media(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
}
@@ -1072,6 +1121,7 @@ bool blk_dev_has_removable_media(BlockBackend *blk)
*/
bool blk_dev_has_tray(BlockBackend *blk)
{
+ IO_CODE();
return blk->dev_ops && blk->dev_ops->is_tray_open;
}
@@ -1081,6 +1131,7 @@ bool blk_dev_has_tray(BlockBackend *blk)
*/
void blk_dev_eject_request(BlockBackend *blk, bool force)
{
+ GLOBAL_STATE_CODE();
if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
}
@@ -1091,6 +1142,7 @@ void blk_dev_eject_request(BlockBackend *blk, bool force)
*/
bool blk_dev_is_tray_open(BlockBackend *blk)
{
+ IO_CODE();
if (blk_dev_has_tray(blk)) {
return blk->dev_ops->is_tray_open(blk->dev_opaque);
}
@@ -1103,6 +1155,7 @@ bool blk_dev_is_tray_open(BlockBackend *blk)
*/
bool blk_dev_is_medium_locked(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
return blk->dev_ops->is_medium_locked(blk->dev_opaque);
}
@@ -1123,6 +1176,7 @@ static void blk_root_resize(BdrvChild *child)
void blk_iostatus_enable(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
blk->iostatus_enabled = true;
blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
}
@@ -1131,6 +1185,7 @@ void blk_iostatus_enable(BlockBackend *blk)
* enables it _and_ the VM is configured to stop on errors */
bool blk_iostatus_is_enabled(const BlockBackend *blk)
{
+ IO_CODE();
return (blk->iostatus_enabled &&
(blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
@@ -1139,16 +1194,19 @@ bool blk_iostatus_is_enabled(const BlockBackend *blk)
BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
return blk->iostatus;
}
void blk_iostatus_disable(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
blk->iostatus_enabled = false;
}
void blk_iostatus_reset(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
if (blk_iostatus_is_enabled(blk)) {
blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
}
@@ -1156,6 +1214,7 @@ void blk_iostatus_reset(BlockBackend *blk)
void blk_iostatus_set_err(BlockBackend *blk, int error)
{
+ IO_CODE();
assert(blk_iostatus_is_enabled(blk));
if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
@@ -1165,16 +1224,19 @@ void blk_iostatus_set_err(BlockBackend *blk, int error)
void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
{
+ IO_CODE();
blk->allow_write_beyond_eof = allow;
}
void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow)
{
+ IO_CODE();
blk->allow_aio_context_change = allow;
}
void blk_set_disable_request_queuing(BlockBackend *blk, bool disable)
{
+ IO_CODE();
blk->disable_request_queuing = disable;
}
@@ -1228,6 +1290,7 @@ blk_co_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
{
int ret;
BlockDriverState *bs;
+ IO_CODE();
blk_wait_while_drained(blk);
@@ -1258,6 +1321,7 @@ int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
BdrvRequestFlags flags)
{
int ret;
+ IO_OR_GS_CODE();
blk_inc_in_flight(blk);
ret = blk_co_do_preadv(blk, offset, bytes, qiov, flags);
@@ -1274,6 +1338,7 @@ blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
{
int ret;
BlockDriverState *bs;
+ IO_CODE();
blk_wait_while_drained(blk);
@@ -1309,6 +1374,7 @@ int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
BdrvRequestFlags flags)
{
int ret;
+ IO_OR_GS_CODE();
blk_inc_in_flight(blk);
ret = blk_co_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags);
@@ -1321,6 +1387,7 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
+ IO_OR_GS_CODE();
return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags);
}
@@ -1349,22 +1416,26 @@ typedef struct BlkRwCo {
int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int64_t bytes, BdrvRequestFlags flags)
{
+ IO_OR_GS_CODE();
return blk_pwritev_part(blk, offset, bytes, NULL, 0,
flags | BDRV_REQ_ZERO_WRITE);
}
int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
{
+ GLOBAL_STATE_CODE();
return bdrv_make_zero(blk->root, flags);
}
void blk_inc_in_flight(BlockBackend *blk)
{
+ IO_CODE();
qatomic_inc(&blk->in_flight);
}
void blk_dec_in_flight(BlockBackend *blk)
{
+ IO_CODE();
qatomic_dec(&blk->in_flight);
aio_wait_kick();
}
@@ -1383,6 +1454,7 @@ BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
void *opaque, int ret)
{
struct BlockBackendAIOCB *acb;
+ IO_CODE();
blk_inc_in_flight(blk);
acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
@@ -1490,6 +1562,7 @@ BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int64_t bytes, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
+ IO_CODE();
return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_write_entry,
flags | BDRV_REQ_ZERO_WRITE, cb, opaque);
}
@@ -1498,6 +1571,7 @@ int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int bytes)
{
int ret;
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_OR_GS_CODE();
blk_inc_in_flight(blk);
ret = blk_do_preadv(blk, offset, bytes, &qiov, 0);
@@ -1511,6 +1585,7 @@ int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int bytes,
{
int ret;
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_OR_GS_CODE();
ret = blk_pwritev_part(blk, offset, bytes, &qiov, 0, flags);
@@ -1519,6 +1594,7 @@ int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int bytes,
int64_t blk_getlength(BlockBackend *blk)
{
+ IO_CODE();
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
@@ -1528,6 +1604,7 @@ int64_t blk_getlength(BlockBackend *blk)
void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
{
+ IO_CODE();
if (!blk_bs(blk)) {
*nb_sectors_ptr = 0;
} else {
@@ -1537,6 +1614,7 @@ void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
int64_t blk_nb_sectors(BlockBackend *blk)
{
+ IO_CODE();
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
@@ -1548,6 +1626,7 @@ BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
+ IO_CODE();
assert((uint64_t)qiov->size <= INT64_MAX);
return blk_aio_prwv(blk, offset, qiov->size, qiov,
blk_aio_read_entry, flags, cb, opaque);
@@ -1557,6 +1636,7 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
+ IO_CODE();
assert((uint64_t)qiov->size <= INT64_MAX);
return blk_aio_prwv(blk, offset, qiov->size, qiov,
blk_aio_write_entry, flags, cb, opaque);
@@ -1564,11 +1644,13 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
void blk_aio_cancel(BlockAIOCB *acb)
{
+ GLOBAL_STATE_CODE();
bdrv_aio_cancel(acb);
}
void blk_aio_cancel_async(BlockAIOCB *acb)
{
+ IO_CODE();
bdrv_aio_cancel_async(acb);
}
@@ -1576,6 +1658,8 @@ void blk_aio_cancel_async(BlockAIOCB *acb)
int coroutine_fn
blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
+ IO_CODE();
+
blk_wait_while_drained(blk);
if (!blk_is_available(blk)) {
@@ -1588,6 +1672,7 @@ blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
int ret;
+ IO_OR_GS_CODE();
blk_inc_in_flight(blk);
ret = blk_do_ioctl(blk, req, buf);
@@ -1609,6 +1694,7 @@ static void blk_aio_ioctl_entry(void *opaque)
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
BlockCompletionFunc *cb, void *opaque)
{
+ IO_CODE();
return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque);
}
@@ -1617,6 +1703,7 @@ int coroutine_fn
blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes)
{
int ret;
+ IO_CODE();
blk_wait_while_drained(blk);
@@ -1641,6 +1728,7 @@ BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
int64_t offset, int64_t bytes,
BlockCompletionFunc *cb, void *opaque)
{
+ IO_CODE();
return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
cb, opaque);
}
@@ -1649,6 +1737,7 @@ int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset,
int64_t bytes)
{
int ret;
+ IO_OR_GS_CODE();
blk_inc_in_flight(blk);
ret = blk_co_do_pdiscard(blk, offset, bytes);
@@ -1660,6 +1749,7 @@ int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset,
int blk_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes)
{
int ret;
+ IO_OR_GS_CODE();
blk_inc_in_flight(blk);
ret = blk_do_pdiscard(blk, offset, bytes);
@@ -1672,6 +1762,7 @@ int blk_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes)
int coroutine_fn blk_co_do_flush(BlockBackend *blk)
{
blk_wait_while_drained(blk);
+ IO_CODE();
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
@@ -1692,12 +1783,14 @@ static void blk_aio_flush_entry(void *opaque)
BlockAIOCB *blk_aio_flush(BlockBackend *blk,
BlockCompletionFunc *cb, void *opaque)
{
+ IO_CODE();
return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
}
int coroutine_fn blk_co_flush(BlockBackend *blk)
{
int ret;
+ IO_OR_GS_CODE();
blk_inc_in_flight(blk);
ret = blk_co_do_flush(blk);
@@ -1720,6 +1813,7 @@ int blk_flush(BlockBackend *blk)
void blk_drain(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
+ GLOBAL_STATE_CODE();
if (bs) {
bdrv_ref(bs);
@@ -1740,6 +1834,8 @@ void blk_drain_all(void)
{
BlockBackend *blk = NULL;
+ GLOBAL_STATE_CODE();
+
bdrv_drain_all_begin();
while ((blk = blk_all_next(blk)) != NULL) {
@@ -1759,12 +1855,14 @@ void blk_drain_all(void)
void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
BlockdevOnError on_write_error)
{
+ GLOBAL_STATE_CODE();
blk->on_read_error = on_read_error;
blk->on_write_error = on_write_error;
}
BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
{
+ IO_CODE();
return is_read ? blk->on_read_error : blk->on_write_error;
}
@@ -1772,6 +1870,7 @@ BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
int error)
{
BlockdevOnError on_err = blk_get_on_error(blk, is_read);
+ IO_CODE();
switch (on_err) {
case BLOCKDEV_ON_ERROR_ENOSPC:
@@ -1811,6 +1910,7 @@ void blk_error_action(BlockBackend *blk, BlockErrorAction action,
bool is_read, int error)
{
assert(error >= 0);
+ IO_CODE();
if (action == BLOCK_ERROR_ACTION_STOP) {
/* First set the iostatus, so that "info block" returns an iostatus
@@ -1842,6 +1942,7 @@ void blk_error_action(BlockBackend *blk, BlockErrorAction action,
bool blk_supports_write_perm(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
+ GLOBAL_STATE_CODE();
if (bs) {
return !bdrv_is_read_only(bs);
@@ -1856,12 +1957,14 @@ bool blk_supports_write_perm(BlockBackend *blk)
*/
bool blk_is_writable(BlockBackend *blk)
{
+ IO_CODE();
return blk->perm & BLK_PERM_WRITE;
}
bool blk_is_sg(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
+ GLOBAL_STATE_CODE();
if (!bs) {
return false;
@@ -1872,41 +1975,47 @@ bool blk_is_sg(BlockBackend *blk)
bool blk_enable_write_cache(BlockBackend *blk)
{
+ IO_CODE();
return blk->enable_write_cache;
}
void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
{
+ GLOBAL_STATE_CODE();
blk->enable_write_cache = wce;
}
-void blk_invalidate_cache(BlockBackend *blk, Error **errp)
+void blk_activate(BlockBackend *blk, Error **errp)
{
BlockDriverState *bs = blk_bs(blk);
+ GLOBAL_STATE_CODE();
if (!bs) {
error_setg(errp, "Device '%s' has no medium", blk->name);
return;
}
- bdrv_invalidate_cache(bs, errp);
+ bdrv_activate(bs, errp);
}
bool blk_is_inserted(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
return bs && bdrv_is_inserted(bs);
}
bool blk_is_available(BlockBackend *blk)
{
+ IO_CODE();
return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
}
void blk_lock_medium(BlockBackend *blk, bool locked)
{
BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
if (bs) {
bdrv_lock_medium(bs, locked);
@@ -1917,6 +2026,7 @@ void blk_eject(BlockBackend *blk, bool eject_flag)
{
BlockDriverState *bs = blk_bs(blk);
char *id;
+ IO_CODE();
if (bs) {
bdrv_eject(bs, eject_flag);
@@ -1933,6 +2043,7 @@ void blk_eject(BlockBackend *blk, bool eject_flag)
int blk_get_flags(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
+ GLOBAL_STATE_CODE();
if (bs) {
return bdrv_get_flags(bs);
@@ -1945,6 +2056,7 @@ int blk_get_flags(BlockBackend *blk)
uint32_t blk_get_request_alignment(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
return bs ? bs->bl.request_alignment : BDRV_SECTOR_SIZE;
}
@@ -1953,6 +2065,7 @@ uint64_t blk_get_max_hw_transfer(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
uint64_t max = INT_MAX;
+ IO_CODE();
if (bs) {
max = MIN_NON_ZERO(max, bs->bl.max_hw_transfer);
@@ -1966,6 +2079,7 @@ uint32_t blk_get_max_transfer(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
uint32_t max = INT_MAX;
+ IO_CODE();
if (bs) {
max = MIN_NON_ZERO(max, bs->bl.max_transfer);
@@ -1975,33 +2089,39 @@ uint32_t blk_get_max_transfer(BlockBackend *blk)
int blk_get_max_hw_iov(BlockBackend *blk)
{
+ IO_CODE();
return MIN_NON_ZERO(blk->root->bs->bl.max_hw_iov,
blk->root->bs->bl.max_iov);
}
int blk_get_max_iov(BlockBackend *blk)
{
+ IO_CODE();
return blk->root->bs->bl.max_iov;
}
void blk_set_guest_block_size(BlockBackend *blk, int align)
{
+ IO_CODE();
blk->guest_block_size = align;
}
void *blk_try_blockalign(BlockBackend *blk, size_t size)
{
+ IO_CODE();
return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
}
void *blk_blockalign(BlockBackend *blk, size_t size)
{
+ IO_CODE();
return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
}
bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
{
BlockDriverState *bs = blk_bs(blk);
+ GLOBAL_STATE_CODE();
if (!bs) {
return false;
@@ -2013,6 +2133,7 @@ bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
{
BlockDriverState *bs = blk_bs(blk);
+ GLOBAL_STATE_CODE();
if (bs) {
bdrv_op_unblock(bs, op, reason);
@@ -2022,6 +2143,7 @@ void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
void blk_op_block_all(BlockBackend *blk, Error *reason)
{
BlockDriverState *bs = blk_bs(blk);
+ GLOBAL_STATE_CODE();
if (bs) {
bdrv_op_block_all(bs, reason);
@@ -2031,6 +2153,7 @@ void blk_op_block_all(BlockBackend *blk, Error *reason)
void blk_op_unblock_all(BlockBackend *blk, Error *reason)
{
BlockDriverState *bs = blk_bs(blk);
+ GLOBAL_STATE_CODE();
if (bs) {
bdrv_op_unblock_all(bs, reason);
@@ -2040,6 +2163,7 @@ void blk_op_unblock_all(BlockBackend *blk, Error *reason)
AioContext *blk_get_aio_context(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
if (bs) {
AioContext *ctx = bdrv_get_aio_context(blk_bs(blk));
@@ -2090,6 +2214,7 @@ static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
Error **errp)
{
+ GLOBAL_STATE_CODE();
return blk_do_set_aio_context(blk, new_context, true, errp);
}
@@ -2126,6 +2251,7 @@ void blk_add_aio_context_notifier(BlockBackend *blk,
{
BlockBackendAioNotifier *notifier;
BlockDriverState *bs = blk_bs(blk);
+ GLOBAL_STATE_CODE();
notifier = g_new(BlockBackendAioNotifier, 1);
notifier->attached_aio_context = attached_aio_context;
@@ -2148,6 +2274,8 @@ void blk_remove_aio_context_notifier(BlockBackend *blk,
BlockBackendAioNotifier *notifier;
BlockDriverState *bs = blk_bs(blk);
+ GLOBAL_STATE_CODE();
+
if (bs) {
bdrv_remove_aio_context_notifier(bs, attached_aio_context,
detach_aio_context, opaque);
@@ -2168,17 +2296,20 @@ void blk_remove_aio_context_notifier(BlockBackend *blk,
void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
{
+ GLOBAL_STATE_CODE();
notifier_list_add(&blk->remove_bs_notifiers, notify);
}
void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
{
+ GLOBAL_STATE_CODE();
notifier_list_add(&blk->insert_bs_notifiers, notify);
}
void blk_io_plug(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
if (bs) {
bdrv_io_plug(bs);
@@ -2188,6 +2319,7 @@ void blk_io_plug(BlockBackend *blk)
void blk_io_unplug(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
+ IO_CODE();
if (bs) {
bdrv_io_unplug(bs);
@@ -2196,18 +2328,21 @@ void blk_io_unplug(BlockBackend *blk)
BlockAcctStats *blk_get_stats(BlockBackend *blk)
{
+ IO_CODE();
return &blk->stats;
}
void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
BlockCompletionFunc *cb, void *opaque)
{
+ IO_CODE();
return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
}
int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int64_t bytes, BdrvRequestFlags flags)
{
+ IO_OR_GS_CODE();
return blk_co_pwritev(blk, offset, bytes, NULL,
flags | BDRV_REQ_ZERO_WRITE);
}
@@ -2216,6 +2351,7 @@ int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
int64_t bytes)
{
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_OR_GS_CODE();
return blk_pwritev_part(blk, offset, bytes, &qiov, 0,
BDRV_REQ_WRITE_COMPRESSED);
}
@@ -2223,6 +2359,7 @@ int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
int blk_truncate(BlockBackend *blk, int64_t offset, bool exact,
PreallocMode prealloc, BdrvRequestFlags flags, Error **errp)
{
+ IO_OR_GS_CODE();
if (!blk_is_available(blk)) {
error_setg(errp, "No medium inserted");
return -ENOMEDIUM;
@@ -2235,6 +2372,7 @@ int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
int64_t pos, int size)
{
int ret;
+ GLOBAL_STATE_CODE();
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
@@ -2254,6 +2392,7 @@ int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
{
+ GLOBAL_STATE_CODE();
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
@@ -2263,6 +2402,7 @@ int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
{
+ GLOBAL_STATE_CODE();
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
@@ -2272,6 +2412,7 @@ int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
{
+ GLOBAL_STATE_CODE();
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
@@ -2285,6 +2426,7 @@ int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
*/
void blk_update_root_state(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
assert(blk->root);
blk->root_state.open_flags = blk->root->bs->open_flags;
@@ -2297,6 +2439,7 @@ void blk_update_root_state(BlockBackend *blk)
*/
bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
return blk->root_state.detect_zeroes;
}
@@ -2306,17 +2449,20 @@ bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
*/
int blk_get_open_flags_from_root_state(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
return blk->root_state.open_flags;
}
BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
return &blk->root_state;
}
int blk_commit_all(void)
{
BlockBackend *blk = NULL;
+ GLOBAL_STATE_CODE();
while ((blk = blk_all_next(blk)) != NULL) {
AioContext *aio_context = blk_get_aio_context(blk);
@@ -2341,6 +2487,7 @@ int blk_commit_all(void)
/* throttling disk I/O limits */
void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
{
+ GLOBAL_STATE_CODE();
throttle_group_config(&blk->public.throttle_group_member, cfg);
}
@@ -2349,6 +2496,7 @@ void blk_io_limits_disable(BlockBackend *blk)
BlockDriverState *bs = blk_bs(blk);
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
assert(tgm->throttle_state);
+ GLOBAL_STATE_CODE();
if (bs) {
bdrv_ref(bs);
bdrv_drained_begin(bs);
@@ -2364,12 +2512,14 @@ void blk_io_limits_disable(BlockBackend *blk)
void blk_io_limits_enable(BlockBackend *blk, const char *group)
{
assert(!blk->public.throttle_group_member.throttle_state);
+ GLOBAL_STATE_CODE();
throttle_group_register_tgm(&blk->public.throttle_group_member,
group, blk_get_aio_context(blk));
}
void blk_io_limits_update_group(BlockBackend *blk, const char *group)
{
+ GLOBAL_STATE_CODE();
/* this BB is not part of any group */
if (!blk->public.throttle_group_member.throttle_state) {
return;
@@ -2437,11 +2587,13 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
void blk_register_buf(BlockBackend *blk, void *host, size_t size)
{
+ GLOBAL_STATE_CODE();
bdrv_register_buf(blk_bs(blk), host, size);
}
void blk_unregister_buf(BlockBackend *blk, void *host)
{
+ GLOBAL_STATE_CODE();
bdrv_unregister_buf(blk_bs(blk), host);
}
@@ -2451,6 +2603,8 @@ int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
BdrvRequestFlags write_flags)
{
int r;
+ IO_CODE();
+
r = blk_check_byte_request(blk_in, off_in, bytes);
if (r) {
return r;
@@ -2466,11 +2620,13 @@ int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
const BdrvChild *blk_root(BlockBackend *blk)
{
+ GLOBAL_STATE_CODE();
return blk->root;
}
int blk_make_empty(BlockBackend *blk, Error **errp)
{
+ GLOBAL_STATE_CODE();
if (!blk_is_available(blk)) {
error_setg(errp, "No medium inserted");
return -ENOMEDIUM;
diff --git a/block/commit.c b/block/commit.c
index b1fc7b908b..c76899f640 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -253,6 +253,8 @@ void commit_start(const char *job_id, BlockDriverState *bs,
uint64_t base_perms, iter_shared_perms;
int ret;
+ GLOBAL_STATE_CODE();
+
assert(top != bs);
if (bdrv_skip_filters(top) == bdrv_skip_filters(base)) {
error_setg(errp, "Invalid files for merge: top and base are the same");
@@ -432,6 +434,8 @@ int bdrv_commit(BlockDriverState *bs)
QEMU_AUTO_VFREE uint8_t *buf = NULL;
Error *local_err = NULL;
+ GLOBAL_STATE_CODE();
+
if (!drv)
return -ENOMEDIUM;
diff --git a/block/copy-before-write.c b/block/copy-before-write.c
index c30a5ff8de..80b7684dba 100644
--- a/block/copy-before-write.c
+++ b/block/copy-before-write.c
@@ -223,6 +223,7 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
QDict *opts;
assert(source->total_sectors == target->total_sectors);
+ GLOBAL_STATE_CODE();
opts = qdict_new();
qdict_put_str(opts, "driver", "copy-before-write");
@@ -245,6 +246,7 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
void bdrv_cbw_drop(BlockDriverState *bs)
{
+ GLOBAL_STATE_CODE();
bdrv_drop_filter(bs, &error_abort);
bdrv_unref(bs);
}
diff --git a/block/copy-before-write.h b/block/copy-before-write.h
index 51847e711a..6e72bb25e9 100644
--- a/block/copy-before-write.h
+++ b/block/copy-before-write.h
@@ -29,6 +29,13 @@
#include "block/block_int.h"
#include "block/block-copy.h"
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
BlockDriverState *target,
const char *filter_node_name,
diff --git a/block/coroutines.h b/block/coroutines.h
index c8c14a29c8..b293e943c8 100644
--- a/block/coroutines.h
+++ b/block/coroutines.h
@@ -30,17 +30,17 @@
/* For blk_bs() in generated block/block-gen.c */
#include "sysemu/block-backend.h"
+/*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
int coroutine_fn bdrv_co_check(BlockDriverState *bs,
BdrvCheckResult *res, BdrvCheckMode fix);
int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp);
-int generated_co_wrapper
-bdrv_preadv(BdrvChild *child, int64_t offset, unsigned int bytes,
- QEMUIOVector *qiov, BdrvRequestFlags flags);
-int generated_co_wrapper
-bdrv_pwritev(BdrvChild *child, int64_t offset, unsigned int bytes,
- QEMUIOVector *qiov, BdrvRequestFlags flags);
-
int coroutine_fn
bdrv_co_common_block_status_above(BlockDriverState *bs,
BlockDriverState *base,
@@ -52,6 +52,51 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
int64_t *map,
BlockDriverState **file,
int *depth);
+
+int coroutine_fn bdrv_co_readv_vmstate(BlockDriverState *bs,
+ QEMUIOVector *qiov, int64_t pos);
+int coroutine_fn bdrv_co_writev_vmstate(BlockDriverState *bs,
+ QEMUIOVector *qiov, int64_t pos);
+
+int coroutine_fn
+nbd_co_do_establish_connection(BlockDriverState *bs, Error **errp);
+
+
+int coroutine_fn
+blk_co_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, BdrvRequestFlags flags);
+
+
+int coroutine_fn
+blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset,
+ BdrvRequestFlags flags);
+
+int coroutine_fn
+blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
+
+int coroutine_fn
+blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes);
+
+int coroutine_fn blk_co_do_flush(BlockBackend *blk);
+
+
+/*
+ * "I/O or GS" API functions. These functions can run without
+ * the BQL, but only in one specific iothread/main loop.
+ *
+ * See include/block/block-io.h for more information about
+ * the "I/O or GS" API.
+ */
+
+int generated_co_wrapper
+bdrv_preadv(BdrvChild *child, int64_t offset, unsigned int bytes,
+ QEMUIOVector *qiov, BdrvRequestFlags flags);
+
+int generated_co_wrapper
+bdrv_pwritev(BdrvChild *child, int64_t offset, unsigned int bytes,
+ QEMUIOVector *qiov, BdrvRequestFlags flags);
+
int generated_co_wrapper
bdrv_common_block_status_above(BlockDriverState *bs,
BlockDriverState *base,
@@ -63,46 +108,24 @@ bdrv_common_block_status_above(BlockDriverState *bs,
int64_t *map,
BlockDriverState **file,
int *depth);
-
-int coroutine_fn bdrv_co_readv_vmstate(BlockDriverState *bs,
- QEMUIOVector *qiov, int64_t pos);
-int coroutine_fn bdrv_co_writev_vmstate(BlockDriverState *bs,
- QEMUIOVector *qiov, int64_t pos);
-
int generated_co_wrapper
nbd_do_establish_connection(BlockDriverState *bs, Error **errp);
-int coroutine_fn
-nbd_co_do_establish_connection(BlockDriverState *bs, Error **errp);
-
int generated_co_wrapper
blk_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags);
-int coroutine_fn
-blk_co_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
- QEMUIOVector *qiov, BdrvRequestFlags flags);
-
int generated_co_wrapper
blk_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags);
-int coroutine_fn
-blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
- QEMUIOVector *qiov, size_t qiov_offset,
- BdrvRequestFlags flags);
int generated_co_wrapper
blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
-int coroutine_fn
-blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
int generated_co_wrapper
blk_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes);
-int coroutine_fn
-blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes);
int generated_co_wrapper blk_do_flush(BlockBackend *blk);
-int coroutine_fn blk_co_do_flush(BlockBackend *blk);
#endif /* BLOCK_COROUTINES_INT_H */
diff --git a/block/create.c b/block/create.c
index 89812669df..4df43f11f4 100644
--- a/block/create.c
+++ b/block/create.c
@@ -42,6 +42,8 @@ static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
int ret;
+ GLOBAL_STATE_CODE();
+
job_progress_set_remaining(&s->common, 1);
ret = s->drv->bdrv_co_create(s->opts, errp);
job_progress_update(&s->common, 1);
diff --git a/block/crypto.c b/block/crypto.c
index c8ba4681e2..9d5fecbef8 100644
--- a/block/crypto.c
+++ b/block/crypto.c
@@ -778,36 +778,54 @@ block_crypto_get_specific_info_luks(BlockDriverState *bs, Error **errp)
}
static int
+block_crypto_amend_prepare(BlockDriverState *bs, Error **errp)
+{
+ BlockCrypto *crypto = bs->opaque;
+ int ret;
+
+ /* apply for exclusive read/write permissions to the underlying file */
+ crypto->updating_keys = true;
+ ret = bdrv_child_refresh_perms(bs, bs->file, errp);
+ if (ret < 0) {
+ /* Well, in this case we will not be updating any keys */
+ crypto->updating_keys = false;
+ }
+ return ret;
+}
+
+static void
+block_crypto_amend_cleanup(BlockDriverState *bs)
+{
+ BlockCrypto *crypto = bs->opaque;
+ Error *errp = NULL;
+
+ /* release exclusive read/write permissions to the underlying file */
+ crypto->updating_keys = false;
+ bdrv_child_refresh_perms(bs, bs->file, &errp);
+
+ if (errp) {
+ error_report_err(errp);
+ }
+}
+
+static int
block_crypto_amend_options_generic_luks(BlockDriverState *bs,
QCryptoBlockAmendOptions *amend_options,
bool force,
Error **errp)
{
BlockCrypto *crypto = bs->opaque;
- int ret;
assert(crypto);
assert(crypto->block);
- /* apply for exclusive read/write permissions to the underlying file*/
- crypto->updating_keys = true;
- ret = bdrv_child_refresh_perms(bs, bs->file, errp);
- if (ret) {
- goto cleanup;
- }
-
- ret = qcrypto_block_amend_options(crypto->block,
- block_crypto_read_func,
- block_crypto_write_func,
- bs,
- amend_options,
- force,
- errp);
-cleanup:
- /* release exclusive read/write permissions to the underlying file*/
- crypto->updating_keys = false;
- bdrv_child_refresh_perms(bs, bs->file, errp);
- return ret;
+ return qcrypto_block_amend_options(crypto->block,
+ block_crypto_read_func,
+ block_crypto_write_func,
+ bs,
+ amend_options,
+ force,
+ errp);
}
static int
@@ -833,8 +851,16 @@ block_crypto_amend_options_luks(BlockDriverState *bs,
if (!amend_options) {
goto cleanup;
}
+
+ ret = block_crypto_amend_prepare(bs, errp);
+ if (ret) {
+ goto perm_cleanup;
+ }
ret = block_crypto_amend_options_generic_luks(bs, amend_options,
force, errp);
+
+perm_cleanup:
+ block_crypto_amend_cleanup(bs);
cleanup:
qapi_free_QCryptoBlockAmendOptions(amend_options);
return ret;
@@ -931,6 +957,8 @@ static BlockDriver bdrv_crypto_luks = {
.bdrv_get_specific_info = block_crypto_get_specific_info_luks,
.bdrv_amend_options = block_crypto_amend_options_luks,
.bdrv_co_amend = block_crypto_co_amend_luks,
+ .bdrv_amend_pre_run = block_crypto_amend_prepare,
+ .bdrv_amend_clean = block_crypto_amend_cleanup,
.is_format = true,
diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c
index 0ef46163e3..0334b85805 100644
--- a/block/dirty-bitmap.c
+++ b/block/dirty-bitmap.c
@@ -496,6 +496,7 @@ static void coroutine_fn bdrv_co_can_store_new_dirty_bitmap_entry(void *opaque)
bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
uint32_t granularity, Error **errp)
{
+ IO_CODE();
if (qemu_in_coroutine()) {
return bdrv_co_can_store_new_dirty_bitmap(bs, name, granularity, errp);
} else {
@@ -656,6 +657,7 @@ void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap,
void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out)
{
+ IO_CODE();
assert(!bdrv_dirty_bitmap_readonly(bitmap));
bdrv_dirty_bitmaps_lock(bitmap->bs);
if (!out) {
@@ -673,6 +675,7 @@ void bdrv_restore_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *backup)
{
HBitmap *tmp = bitmap->bitmap;
assert(!bdrv_dirty_bitmap_readonly(bitmap));
+ GLOBAL_STATE_CODE();
bitmap->bitmap = backup;
hbitmap_free(tmp);
}
@@ -737,6 +740,7 @@ void bdrv_dirty_bitmap_deserialize_finish(BdrvDirtyBitmap *bitmap)
void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes)
{
BdrvDirtyBitmap *bitmap;
+ IO_CODE();
if (QLIST_EMPTY(&bs->dirty_bitmaps)) {
return;
@@ -928,6 +932,7 @@ bool bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest,
bool lock)
{
bool ret;
+ IO_CODE();
assert(!bdrv_dirty_bitmap_readonly(dest));
assert(!bdrv_dirty_bitmap_inconsistent(dest));
diff --git a/block/export/export.c b/block/export/export.c
index 6d3b9964c8..7253af3bc3 100644
--- a/block/export/export.c
+++ b/block/export/export.c
@@ -139,7 +139,7 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
* access since the export could be available before migration handover.
* ctx was acquired in the caller.
*/
- bdrv_invalidate_cache(bs, NULL);
+ bdrv_activate(bs, NULL);
perm = BLK_PERM_CONSISTENT_READ;
if (export->writable) {
diff --git a/block/export/fuse.c b/block/export/fuse.c
index fdda8e3c81..5029e70f84 100644
--- a/block/export/fuse.c
+++ b/block/export/fuse.c
@@ -86,8 +86,8 @@ static int fuse_export_create(BlockExport *blk_exp,
assert(blk_exp_args->type == BLOCK_EXPORT_TYPE_FUSE);
- /* For growable exports, take the RESIZE permission */
- if (args->growable) {
+ /* For growable and writable exports, take the RESIZE permission */
+ if (args->growable || blk_exp_args->writable) {
uint64_t blk_perm, blk_shared_perm;
blk_get_perm(exp->common.blk, &blk_perm, &blk_shared_perm);
@@ -392,14 +392,23 @@ static int fuse_do_truncate(const FuseExport *exp, int64_t size,
{
uint64_t blk_perm, blk_shared_perm;
BdrvRequestFlags truncate_flags = 0;
- int ret;
+ bool add_resize_perm;
+ int ret, ret_check;
+
+ /* Growable and writable exports have a permanent RESIZE permission */
+ add_resize_perm = !exp->growable && !exp->writable;
if (req_zero_write) {
truncate_flags |= BDRV_REQ_ZERO_WRITE;
}
- /* Growable exports have a permanent RESIZE permission */
- if (!exp->growable) {
+ if (add_resize_perm) {
+
+ if (!qemu_in_main_thread()) {
+ /* Changing permissions like below only works in the main thread */
+ return -EPERM;
+ }
+
blk_get_perm(exp->common.blk, &blk_perm, &blk_shared_perm);
ret = blk_set_perm(exp->common.blk, blk_perm | BLK_PERM_RESIZE,
@@ -412,9 +421,11 @@ static int fuse_do_truncate(const FuseExport *exp, int64_t size,
ret = blk_truncate(exp->common.blk, size, true, prealloc,
truncate_flags, NULL);
- if (!exp->growable) {
+ if (add_resize_perm) {
/* Must succeed, because we are only giving up the RESIZE permission */
- blk_set_perm(exp->common.blk, blk_perm, blk_shared_perm, &error_abort);
+ ret_check = blk_set_perm(exp->common.blk, blk_perm,
+ blk_shared_perm, &error_abort);
+ assert(ret_check == 0);
}
return ret;
diff --git a/block/io.c b/block/io.c
index 4e4cb556c5..efc011ce65 100644
--- a/block/io.c
+++ b/block/io.c
@@ -70,6 +70,7 @@ static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c,
void bdrv_parent_drained_end_single(BdrvChild *c)
{
int drained_end_counter = 0;
+ IO_OR_GS_CODE();
bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0);
}
@@ -114,6 +115,7 @@ static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
{
+ IO_OR_GS_CODE();
c->parent_quiesce_counter++;
if (c->klass->drained_begin) {
c->klass->drained_begin(c);
@@ -164,6 +166,8 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
BdrvChild *c;
bool have_limits;
+ GLOBAL_STATE_CODE();
+
if (tran) {
BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
*s = (BdrvRefreshLimitsState) {
@@ -189,10 +193,6 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
QLIST_FOREACH(c, &bs->children, next) {
if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
{
- bdrv_refresh_limits(c->bs, tran, errp);
- if (*errp) {
- return;
- }
bdrv_merge_limits(&bs->bl, &c->bs->bl);
have_limits = true;
}
@@ -226,12 +226,14 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
*/
void bdrv_enable_copy_on_read(BlockDriverState *bs)
{
+ IO_CODE();
qatomic_inc(&bs->copy_on_read);
}
void bdrv_disable_copy_on_read(BlockDriverState *bs)
{
int old = qatomic_fetch_dec(&bs->copy_on_read);
+ IO_CODE();
assert(old >= 1);
}
@@ -303,6 +305,7 @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
BdrvChild *ignore_parent, bool ignore_bds_parents)
{
BdrvChild *child, *next;
+ IO_OR_GS_CODE();
if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
return true;
@@ -426,6 +429,7 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
BdrvChild *parent, bool ignore_bds_parents)
{
+ IO_OR_GS_CODE();
assert(!qemu_in_coroutine());
/* Stop things in parent-to-child order */
@@ -477,11 +481,13 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
void bdrv_drained_begin(BlockDriverState *bs)
{
+ IO_OR_GS_CODE();
bdrv_do_drained_begin(bs, false, NULL, false, true);
}
void bdrv_subtree_drained_begin(BlockDriverState *bs)
{
+ IO_OR_GS_CODE();
bdrv_do_drained_begin(bs, true, NULL, false, true);
}
@@ -538,18 +544,21 @@ static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
void bdrv_drained_end(BlockDriverState *bs)
{
int drained_end_counter = 0;
+ IO_OR_GS_CODE();
bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
}
void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
{
+ IO_CODE();
bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
}
void bdrv_subtree_drained_end(BlockDriverState *bs)
{
int drained_end_counter = 0;
+ IO_OR_GS_CODE();
bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
}
@@ -557,6 +566,7 @@ void bdrv_subtree_drained_end(BlockDriverState *bs)
void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
{
int i;
+ IO_OR_GS_CODE();
for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
bdrv_do_drained_begin(child->bs, true, child, false, true);
@@ -567,6 +577,7 @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
{
int drained_end_counter = 0;
int i;
+ IO_OR_GS_CODE();
for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
bdrv_do_drained_end(child->bs, true, child, false,
@@ -585,6 +596,7 @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
*/
void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
{
+ IO_OR_GS_CODE();
assert(qemu_in_coroutine());
bdrv_drained_begin(bs);
bdrv_drained_end(bs);
@@ -592,6 +604,7 @@ void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
void bdrv_drain(BlockDriverState *bs)
{
+ IO_OR_GS_CODE();
bdrv_drained_begin(bs);
bdrv_drained_end(bs);
}
@@ -612,6 +625,7 @@ static bool bdrv_drain_all_poll(void)
{
BlockDriverState *bs = NULL;
bool result = false;
+ GLOBAL_STATE_CODE();
/* bdrv_drain_poll() can't make changes to the graph and we are holding the
* main AioContext lock, so iterating bdrv_next_all_states() is safe. */
@@ -640,6 +654,7 @@ static bool bdrv_drain_all_poll(void)
void bdrv_drain_all_begin(void)
{
BlockDriverState *bs = NULL;
+ GLOBAL_STATE_CODE();
if (qemu_in_coroutine()) {
bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
@@ -682,6 +697,7 @@ void bdrv_drain_all_begin(void)
void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
{
int drained_end_counter = 0;
+ GLOBAL_STATE_CODE();
g_assert(bs->quiesce_counter > 0);
g_assert(!bs->refcnt);
@@ -696,6 +712,7 @@ void bdrv_drain_all_end(void)
{
BlockDriverState *bs = NULL;
int drained_end_counter = 0;
+ GLOBAL_STATE_CODE();
/*
* bdrv queue is managed by record/replay,
@@ -723,6 +740,7 @@ void bdrv_drain_all_end(void)
void bdrv_drain_all(void)
{
+ GLOBAL_STATE_CODE();
bdrv_drain_all_begin();
bdrv_drain_all_end();
}
@@ -867,6 +885,7 @@ BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
{
BdrvTrackedRequest *req;
Coroutine *self = qemu_coroutine_self();
+ IO_CODE();
QLIST_FOREACH(req, &bs->tracked_requests, list) {
if (req->co == self) {
@@ -886,7 +905,7 @@ void bdrv_round_to_clusters(BlockDriverState *bs,
int64_t *cluster_bytes)
{
BlockDriverInfo bdi;
-
+ IO_CODE();
if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
*cluster_offset = offset;
*cluster_bytes = bytes;
@@ -912,16 +931,19 @@ static int bdrv_get_cluster_size(BlockDriverState *bs)
void bdrv_inc_in_flight(BlockDriverState *bs)
{
+ IO_CODE();
qatomic_inc(&bs->in_flight);
}
void bdrv_wakeup(BlockDriverState *bs)
{
+ IO_CODE();
aio_wait_kick();
}
void bdrv_dec_in_flight(BlockDriverState *bs)
{
+ IO_CODE();
qatomic_dec(&bs->in_flight);
bdrv_wakeup(bs);
}
@@ -946,6 +968,7 @@ bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
uint64_t align)
{
bool waited;
+ IO_CODE();
qemu_co_mutex_lock(&req->bs->reqs_lock);
@@ -1040,6 +1063,7 @@ static int bdrv_check_request32(int64_t offset, int64_t bytes,
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
int64_t bytes, BdrvRequestFlags flags)
{
+ IO_CODE();
return bdrv_pwritev(child, offset, bytes, NULL,
BDRV_REQ_ZERO_WRITE | flags);
}
@@ -1058,6 +1082,7 @@ int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
int ret;
int64_t target_size, bytes, offset = 0;
BlockDriverState *bs = child->bs;
+ IO_CODE();
target_size = bdrv_getlength(bs);
if (target_size < 0) {
@@ -1090,6 +1115,7 @@ int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int64_t bytes)
{
int ret;
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_CODE();
if (bytes < 0) {
return -EINVAL;
@@ -1111,6 +1137,7 @@ int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf,
{
int ret;
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_CODE();
if (bytes < 0) {
return -EINVAL;
@@ -1131,6 +1158,7 @@ int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
const void *buf, int64_t count)
{
int ret;
+ IO_CODE();
ret = bdrv_pwrite(child, offset, buf, count);
if (ret < 0) {
@@ -1797,6 +1825,7 @@ int coroutine_fn bdrv_co_preadv(BdrvChild *child,
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
+ IO_CODE();
return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
}
@@ -1809,6 +1838,7 @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
BdrvTrackedRequest req;
BdrvRequestPadding pad;
int ret;
+ IO_CODE();
trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
@@ -2230,6 +2260,7 @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
+ IO_CODE();
return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
}
@@ -2243,6 +2274,7 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
BdrvRequestPadding pad;
int ret;
bool padded = false;
+ IO_CODE();
trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
@@ -2326,6 +2358,7 @@ out:
int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
int64_t bytes, BdrvRequestFlags flags)
{
+ IO_CODE();
trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
@@ -2345,6 +2378,8 @@ int bdrv_flush_all(void)
BlockDriverState *bs = NULL;
int result = 0;
+ GLOBAL_STATE_CODE();
+
/*
* bdrv queue is managed by record/replay,
* creating new flush request for stopping
@@ -2639,6 +2674,7 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
BlockDriverState *p;
int64_t eof = 0;
int dummy;
+ IO_CODE();
assert(!include_base || base); /* Can't include NULL base */
@@ -2728,6 +2764,7 @@ int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file)
{
+ IO_CODE();
return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
pnum, map, file, NULL);
}
@@ -2735,6 +2772,7 @@ int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map, BlockDriverState **file)
{
+ IO_CODE();
return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
offset, bytes, pnum, map, file);
}
@@ -2751,6 +2789,7 @@ int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
{
int ret;
int64_t pnum = bytes;
+ IO_CODE();
if (!bytes) {
return 1;
@@ -2771,6 +2810,7 @@ int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
{
int ret;
int64_t dummy;
+ IO_CODE();
ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
bytes, pnum ? pnum : &dummy, NULL,
@@ -2807,6 +2847,7 @@ int bdrv_is_allocated_above(BlockDriverState *top,
int ret = bdrv_common_block_status_above(top, base, include_base, false,
offset, bytes, pnum, NULL, NULL,
&depth);
+ IO_CODE();
if (ret < 0) {
return ret;
}
@@ -2823,6 +2864,7 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
BlockDriver *drv = bs->drv;
BlockDriverState *child_bs = bdrv_primary_bs(bs);
int ret;
+ IO_CODE();
ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
if (ret < 0) {
@@ -2854,6 +2896,7 @@ bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
BlockDriver *drv = bs->drv;
BlockDriverState *child_bs = bdrv_primary_bs(bs);
int ret;
+ IO_CODE();
ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
if (ret < 0) {
@@ -2884,6 +2927,7 @@ int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
{
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
int ret = bdrv_writev_vmstate(bs, &qiov, pos);
+ IO_CODE();
return ret < 0 ? ret : size;
}
@@ -2893,6 +2937,7 @@ int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
{
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
int ret = bdrv_readv_vmstate(bs, &qiov, pos);
+ IO_CODE();
return ret < 0 ? ret : size;
}
@@ -2902,6 +2947,7 @@ int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
void bdrv_aio_cancel(BlockAIOCB *acb)
{
+ IO_CODE();
qemu_aio_ref(acb);
bdrv_aio_cancel_async(acb);
while (acb->refcnt > 1) {
@@ -2926,6 +2972,7 @@ void bdrv_aio_cancel(BlockAIOCB *acb)
* In either case the completion callback must be called. */
void bdrv_aio_cancel_async(BlockAIOCB *acb)
{
+ IO_CODE();
if (acb->aiocb_info->cancel_async) {
acb->aiocb_info->cancel_async(acb);
}
@@ -2940,6 +2987,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
BdrvChild *child;
int current_gen;
int ret = 0;
+ IO_CODE();
bdrv_inc_in_flight(bs);
@@ -3065,6 +3113,7 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
int64_t max_pdiscard;
int head, tail, align;
BlockDriverState *bs = child->bs;
+ IO_CODE();
if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
return -ENOMEDIUM;
@@ -3183,6 +3232,7 @@ int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
.coroutine = qemu_coroutine_self(),
};
BlockAIOCB *acb;
+ IO_CODE();
bdrv_inc_in_flight(bs);
if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
@@ -3207,17 +3257,20 @@ out:
void *qemu_blockalign(BlockDriverState *bs, size_t size)
{
+ IO_CODE();
return qemu_memalign(bdrv_opt_mem_align(bs), size);
}
void *qemu_blockalign0(BlockDriverState *bs, size_t size)
{
+ IO_CODE();
return memset(qemu_blockalign(bs, size), 0, size);
}
void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
{
size_t align = bdrv_opt_mem_align(bs);
+ IO_CODE();
/* Ensure that NULL is never returned on success */
assert(align > 0);
@@ -3231,6 +3284,7 @@ void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
{
void *mem = qemu_try_blockalign(bs, size);
+ IO_CODE();
if (mem) {
memset(mem, 0, size);
@@ -3246,6 +3300,7 @@ bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
{
int i;
size_t alignment = bdrv_min_mem_align(bs);
+ IO_CODE();
for (i = 0; i < qiov->niov; i++) {
if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
@@ -3262,6 +3317,7 @@ bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
void bdrv_io_plug(BlockDriverState *bs)
{
BdrvChild *child;
+ IO_CODE();
QLIST_FOREACH(child, &bs->children, next) {
bdrv_io_plug(child->bs);
@@ -3278,6 +3334,7 @@ void bdrv_io_plug(BlockDriverState *bs)
void bdrv_io_unplug(BlockDriverState *bs)
{
BdrvChild *child;
+ IO_CODE();
assert(bs->io_plugged);
if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
@@ -3296,6 +3353,7 @@ void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
{
BdrvChild *child;
+ GLOBAL_STATE_CODE();
if (bs->drv && bs->drv->bdrv_register_buf) {
bs->drv->bdrv_register_buf(bs, host, size);
}
@@ -3308,6 +3366,7 @@ void bdrv_unregister_buf(BlockDriverState *bs, void *host)
{
BdrvChild *child;
+ GLOBAL_STATE_CODE();
if (bs->drv && bs->drv->bdrv_unregister_buf) {
bs->drv->bdrv_unregister_buf(bs, host);
}
@@ -3402,6 +3461,7 @@ int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
+ IO_CODE();
trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
read_flags, write_flags);
return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
@@ -3418,6 +3478,7 @@ int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
+ IO_CODE();
trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
read_flags, write_flags);
return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
@@ -3429,6 +3490,7 @@ int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
int64_t bytes, BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
+ IO_CODE();
return bdrv_co_copy_range_from(src, src_offset,
dst, dst_offset,
bytes, read_flags, write_flags);
@@ -3461,7 +3523,7 @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
BdrvTrackedRequest req;
int64_t old_size, new_bytes;
int ret;
-
+ IO_CODE();
/* if bs->drv == NULL, bs is closed, so there's nothing to do here */
if (!drv) {
@@ -3579,6 +3641,7 @@ out:
void bdrv_cancel_in_flight(BlockDriverState *bs)
{
+ GLOBAL_STATE_CODE();
if (!bs || !bs->drv) {
return;
}
diff --git a/block/meson.build b/block/meson.build
index 8a1ce58c9c..e42bcb58d5 100644
--- a/block/meson.build
+++ b/block/meson.build
@@ -131,8 +131,11 @@ block_ss.add(module_block_h)
wrapper_py = find_program('../scripts/block-coroutine-wrapper.py')
block_gen_c = custom_target('block-gen.c',
output: 'block-gen.c',
- input: files('../include/block/block.h',
- 'coroutines.h'),
+ input: files(
+ '../include/block/block-io.h',
+ '../include/block/block-global-state.h',
+ 'coroutines.h'
+ ),
command: [wrapper_py, '@OUTPUT@', '@INPUT@'])
block_ss.add(block_gen_c)
diff --git a/block/mirror.c b/block/mirror.c
index 69b2c1c697..ce6bc58d1f 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -1864,6 +1864,8 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
bool is_none_mode;
BlockDriverState *base;
+ GLOBAL_STATE_CODE();
+
if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
(mode == MIRROR_SYNC_MODE_BITMAP)) {
error_setg(errp, "Sync mode '%s' not supported",
@@ -1889,6 +1891,8 @@ BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
bool base_read_only;
BlockJob *job;
+ GLOBAL_STATE_CODE();
+
base_read_only = bdrv_is_read_only(base);
if (base_read_only) {
diff --git a/block/monitor/bitmap-qmp-cmds.c b/block/monitor/bitmap-qmp-cmds.c
index 9f11deec64..972e8a0afc 100644
--- a/block/monitor/bitmap-qmp-cmds.c
+++ b/block/monitor/bitmap-qmp-cmds.c
@@ -56,6 +56,8 @@ BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node,
BlockDriverState *bs;
BdrvDirtyBitmap *bitmap;
+ GLOBAL_STATE_CODE();
+
if (!node) {
error_setg(errp, "Node cannot be NULL");
return NULL;
@@ -155,6 +157,8 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
BdrvDirtyBitmap *bitmap;
AioContext *aio_context;
+ GLOBAL_STATE_CODE();
+
bitmap = block_dirty_bitmap_lookup(node, name, &bs, errp);
if (!bitmap || !bs) {
return NULL;
@@ -261,6 +265,8 @@ BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target,
BlockDirtyBitmapMergeSourceList *lst;
Error *local_err = NULL;
+ GLOBAL_STATE_CODE();
+
dst = block_dirty_bitmap_lookup(node, target, &bs, errp);
if (!dst) {
return NULL;
diff --git a/block/nbd.c b/block/nbd.c
index 5853d85d60..146d25660e 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -313,6 +313,7 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
int ret;
bool blocking = nbd_client_connecting_wait(s);
+ IO_CODE();
assert(!s->ioc);
diff --git a/block/parallels.c b/block/parallels.c
index 6ebad2a2bb..e58c828422 100644
--- a/block/parallels.c
+++ b/block/parallels.c
@@ -873,7 +873,7 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
s->bat_dirty_bmap =
bitmap_new(DIV_ROUND_UP(s->header_size, s->bat_dirty_block));
- /* Disable migration until bdrv_invalidate_cache method is added */
+ /* Disable migration until bdrv_activate method is added */
error_setg(&s->migration_blocker, "The Parallels format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
diff --git a/block/snapshot.c b/block/snapshot.c
index ccacda8bd5..d6f53c3065 100644
--- a/block/snapshot.c
+++ b/block/snapshot.c
@@ -57,6 +57,8 @@ int bdrv_snapshot_find(BlockDriverState *bs, QEMUSnapshotInfo *sn_info,
QEMUSnapshotInfo *sn_tab, *sn;
int nb_sns, i, ret;
+ GLOBAL_STATE_CODE();
+
ret = -ENOENT;
nb_sns = bdrv_snapshot_list(bs, &sn_tab);
if (nb_sns < 0) {
@@ -105,6 +107,7 @@ bool bdrv_snapshot_find_by_id_and_name(BlockDriverState *bs,
bool ret = false;
assert(id || name);
+ GLOBAL_STATE_CODE();
nb_sns = bdrv_snapshot_list(bs, &sn_tab);
if (nb_sns < 0) {
@@ -200,6 +203,7 @@ static BlockDriverState *bdrv_snapshot_fallback(BlockDriverState *bs)
int bdrv_can_snapshot(BlockDriverState *bs)
{
BlockDriver *drv = bs->drv;
+ GLOBAL_STATE_CODE();
if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
return 0;
}
@@ -220,6 +224,9 @@ int bdrv_snapshot_create(BlockDriverState *bs,
{
BlockDriver *drv = bs->drv;
BlockDriverState *fallback_bs = bdrv_snapshot_fallback(bs);
+
+ GLOBAL_STATE_CODE();
+
if (!drv) {
return -ENOMEDIUM;
}
@@ -240,6 +247,8 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
BdrvChild **fallback_ptr;
int ret, open_ret;
+ GLOBAL_STATE_CODE();
+
if (!drv) {
error_setg(errp, "Block driver is closed");
return -ENOMEDIUM;
@@ -348,6 +357,8 @@ int bdrv_snapshot_delete(BlockDriverState *bs,
BlockDriverState *fallback_bs = bdrv_snapshot_fallback(bs);
int ret;
+ GLOBAL_STATE_CODE();
+
if (!drv) {
error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, bdrv_get_device_name(bs));
return -ENOMEDIUM;
@@ -380,6 +391,8 @@ int bdrv_snapshot_list(BlockDriverState *bs,
{
BlockDriver *drv = bs->drv;
BlockDriverState *fallback_bs = bdrv_snapshot_fallback(bs);
+
+ GLOBAL_STATE_CODE();
if (!drv) {
return -ENOMEDIUM;
}
@@ -419,6 +432,8 @@ int bdrv_snapshot_load_tmp(BlockDriverState *bs,
{
BlockDriver *drv = bs->drv;
+ GLOBAL_STATE_CODE();
+
if (!drv) {
error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, bdrv_get_device_name(bs));
return -ENOMEDIUM;
@@ -447,6 +462,8 @@ int bdrv_snapshot_load_tmp_by_id_or_name(BlockDriverState *bs,
int ret;
Error *local_err = NULL;
+ GLOBAL_STATE_CODE();
+
ret = bdrv_snapshot_load_tmp(bs, id_or_name, NULL, &local_err);
if (ret == -ENOENT || ret == -EINVAL) {
error_free(local_err);
@@ -515,6 +532,8 @@ bool bdrv_all_can_snapshot(bool has_devices, strList *devices,
g_autoptr(GList) bdrvs = NULL;
GList *iterbdrvs;
+ GLOBAL_STATE_CODE();
+
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return false;
}
@@ -549,6 +568,8 @@ int bdrv_all_delete_snapshot(const char *name,
g_autoptr(GList) bdrvs = NULL;
GList *iterbdrvs;
+ GLOBAL_STATE_CODE();
+
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return -1;
}
@@ -588,6 +609,8 @@ int bdrv_all_goto_snapshot(const char *name,
g_autoptr(GList) bdrvs = NULL;
GList *iterbdrvs;
+ GLOBAL_STATE_CODE();
+
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return -1;
}
@@ -622,6 +645,8 @@ int bdrv_all_has_snapshot(const char *name,
g_autoptr(GList) bdrvs = NULL;
GList *iterbdrvs;
+ GLOBAL_STATE_CODE();
+
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return -1;
}
@@ -663,6 +688,7 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn,
{
g_autoptr(GList) bdrvs = NULL;
GList *iterbdrvs;
+ GLOBAL_STATE_CODE();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return -1;
@@ -703,6 +729,8 @@ BlockDriverState *bdrv_all_find_vmstate_bs(const char *vmstate_bs,
g_autoptr(GList) bdrvs = NULL;
GList *iterbdrvs;
+ GLOBAL_STATE_CODE();
+
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return NULL;
}
diff --git a/block/stream.c b/block/stream.c
index 7c6b173ddd..3acb59fe6a 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -220,6 +220,8 @@ void stream_start(const char *job_id, BlockDriverState *bs,
QDict *opts;
int ret;
+ GLOBAL_STATE_CODE();
+
assert(!(base && bottom));
assert(!(backing_file_str && bottom));