aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin Wolf <kwolf@redhat.com>2011-07-15 16:36:40 +0200
committerKevin Wolf <kwolf@redhat.com>2011-08-02 15:53:41 +0200
commit384acbf46b70edf0d2c1648aa1a92a90bcf7057d (patch)
tree38459f728adf03df6b83a6273484f61d05e0dd01
parent52b8eb60132b27ad53476490e9d7579003390cfa (diff)
async: Remove AsyncContext
The purpose of AsyncContexts was to protect qcow and qcow2 against reentrancy during an emulated bdrv_read/write (which includes a qemu_aio_wait() call and can run AIO callbacks of different requests if it weren't for AsyncContexts). Now both qcow and qcow2 are protected by CoMutexes and AsyncContexts can be removed. Signed-off-by: Kevin Wolf <kwolf@redhat.com>
-rw-r--r--async.c98
-rw-r--r--block.c6
-rw-r--r--block/qed-table.c14
-rw-r--r--block/qed.c4
-rw-r--r--linux-aio.c43
-rw-r--r--posix-aio-compat.c11
-rw-r--r--qemu-common.h4
7 files changed, 11 insertions, 169 deletions
diff --git a/async.c b/async.c
index fd313dffb7..3fe70b9deb 100644
--- a/async.c
+++ b/async.c
@@ -25,92 +25,8 @@
#include "qemu-common.h"
#include "qemu-aio.h"
-/*
- * An AsyncContext protects the callbacks of AIO requests and Bottom Halves
- * against interfering with each other. A typical example is qcow2 that accepts
- * asynchronous requests, but relies for manipulation of its metadata on
- * synchronous bdrv_read/write that doesn't trigger any callbacks.
- *
- * However, these functions are often emulated using AIO which means that AIO
- * callbacks must be run - but at the same time we must not run callbacks of
- * other requests as they might start to modify metadata and corrupt the
- * internal state of the caller of bdrv_read/write.
- *
- * To achieve the desired semantics we switch into a new AsyncContext.
- * Callbacks must only be run if they belong to the current AsyncContext.
- * Otherwise they need to be queued until their own context is active again.
- * This is how you can make qemu_aio_wait() wait only for your own callbacks.
- *
- * The AsyncContexts form a stack. When you leave a AsyncContexts, you always
- * return to the old ("parent") context.
- */
-struct AsyncContext {
- /* Consecutive number of the AsyncContext (position in the stack) */
- int id;
-
- /* Anchor of the list of Bottom Halves belonging to the context */
- struct QEMUBH *first_bh;
-
- /* Link to parent context */
- struct AsyncContext *parent;
-};
-
-/* The currently active AsyncContext */
-static struct AsyncContext *async_context = &(struct AsyncContext) { 0 };
-
-/*
- * Enter a new AsyncContext. Already scheduled Bottom Halves and AIO callbacks
- * won't be called until this context is left again.
- */
-void async_context_push(void)
-{
- struct AsyncContext *new = qemu_mallocz(sizeof(*new));
- new->parent = async_context;
- new->id = async_context->id + 1;
- async_context = new;
-}
-
-/* Run queued AIO completions and destroy Bottom Half */
-static void bh_run_aio_completions(void *opaque)
-{
- QEMUBH **bh = opaque;
- qemu_bh_delete(*bh);
- qemu_free(bh);
- qemu_aio_process_queue();
-}
-/*
- * Leave the currently active AsyncContext. All Bottom Halves belonging to the
- * old context are executed before changing the context.
- */
-void async_context_pop(void)
-{
- struct AsyncContext *old = async_context;
- QEMUBH **bh;
-
- /* Flush the bottom halves, we don't want to lose them */
- while (qemu_bh_poll());
-
- /* Switch back to the parent context */
- async_context = async_context->parent;
- qemu_free(old);
-
- if (async_context == NULL) {
- abort();
- }
-
- /* Schedule BH to run any queued AIO completions as soon as possible */
- bh = qemu_malloc(sizeof(*bh));
- *bh = qemu_bh_new(bh_run_aio_completions, bh);
- qemu_bh_schedule(*bh);
-}
-
-/*
- * Returns the ID of the currently active AsyncContext
- */
-int get_async_context_id(void)
-{
- return async_context->id;
-}
+/* Anchor of the list of Bottom Halves belonging to the context */
+static struct QEMUBH *first_bh;
/***********************************************************/
/* bottom halves (can be seen as timers which expire ASAP) */
@@ -130,8 +46,8 @@ QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
bh = qemu_mallocz(sizeof(QEMUBH));
bh->cb = cb;
bh->opaque = opaque;
- bh->next = async_context->first_bh;
- async_context->first_bh = bh;
+ bh->next = first_bh;
+ first_bh = bh;
return bh;
}
@@ -141,7 +57,7 @@ int qemu_bh_poll(void)
int ret;
ret = 0;
- for (bh = async_context->first_bh; bh; bh = next) {
+ for (bh = first_bh; bh; bh = next) {
next = bh->next;
if (!bh->deleted && bh->scheduled) {
bh->scheduled = 0;
@@ -153,7 +69,7 @@ int qemu_bh_poll(void)
}
/* remove deleted bhs */
- bhp = &async_context->first_bh;
+ bhp = &first_bh;
while (*bhp) {
bh = *bhp;
if (bh->deleted) {
@@ -199,7 +115,7 @@ void qemu_bh_update_timeout(int *timeout)
{
QEMUBH *bh;
- for (bh = async_context->first_bh; bh; bh = bh->next) {
+ for (bh = first_bh; bh; bh = bh->next) {
if (!bh->deleted && bh->scheduled) {
if (bh->idle) {
/* idle bottom halves will be polled at least
diff --git a/block.c b/block.c
index e6abea85df..0d05b4b32d 100644
--- a/block.c
+++ b/block.c
@@ -2777,8 +2777,6 @@ static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
struct iovec iov;
QEMUIOVector qiov;
- async_context_push();
-
async_ret = NOT_DONE;
iov.iov_base = (void *)buf;
iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
@@ -2796,7 +2794,6 @@ static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
fail:
- async_context_pop();
return async_ret;
}
@@ -2808,8 +2805,6 @@ static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
struct iovec iov;
QEMUIOVector qiov;
- async_context_push();
-
async_ret = NOT_DONE;
iov.iov_base = (void *)buf;
iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
@@ -2825,7 +2820,6 @@ static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
}
fail:
- async_context_pop();
return async_ret;
}
diff --git a/block/qed-table.c b/block/qed-table.c
index d38c673547..d96afa81d7 100644
--- a/block/qed-table.c
+++ b/block/qed-table.c
@@ -179,16 +179,12 @@ int qed_read_l1_table_sync(BDRVQEDState *s)
{
int ret = -EINPROGRESS;
- async_context_push();
-
qed_read_table(s, s->header.l1_table_offset,
s->l1_table, qed_sync_cb, &ret);
while (ret == -EINPROGRESS) {
qemu_aio_wait();
}
- async_context_pop();
-
return ret;
}
@@ -205,15 +201,11 @@ int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index,
{
int ret = -EINPROGRESS;
- async_context_push();
-
qed_write_l1_table(s, index, n, qed_sync_cb, &ret);
while (ret == -EINPROGRESS) {
qemu_aio_wait();
}
- async_context_pop();
-
return ret;
}
@@ -282,14 +274,11 @@ int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset
{
int ret = -EINPROGRESS;
- async_context_push();
-
qed_read_l2_table(s, request, offset, qed_sync_cb, &ret);
while (ret == -EINPROGRESS) {
qemu_aio_wait();
}
- async_context_pop();
return ret;
}
@@ -307,13 +296,10 @@ int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
{
int ret = -EINPROGRESS;
- async_context_push();
-
qed_write_l2_table(s, request, index, n, flush, qed_sync_cb, &ret);
while (ret == -EINPROGRESS) {
qemu_aio_wait();
}
- async_context_pop();
return ret;
}
diff --git a/block/qed.c b/block/qed.c
index 39703793e9..333f067582 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -680,16 +680,12 @@ static int bdrv_qed_is_allocated(BlockDriverState *bs, int64_t sector_num,
};
QEDRequest request = { .l2_table = NULL };
- async_context_push();
-
qed_find_cluster(s, &request, pos, len, qed_is_allocated_cb, &cb);
while (cb.is_allocated == -1) {
qemu_aio_wait();
}
- async_context_pop();
-
qed_unref_l2_cache_entry(request.l2_table);
return cb.is_allocated;
diff --git a/linux-aio.c b/linux-aio.c
index 68f4b3d757..dc3faf2499 100644
--- a/linux-aio.c
+++ b/linux-aio.c
@@ -31,7 +31,6 @@ struct qemu_laiocb {
struct iocb iocb;
ssize_t ret;
size_t nbytes;
- int async_context_id;
QLIST_ENTRY(qemu_laiocb) node;
};
@@ -39,7 +38,6 @@ struct qemu_laio_state {
io_context_t ctx;
int efd;
int count;
- QLIST_HEAD(, qemu_laiocb) completed_reqs;
};
static inline ssize_t io_event_ret(struct io_event *ev)
@@ -49,7 +47,6 @@ static inline ssize_t io_event_ret(struct io_event *ev)
/*
* Completes an AIO request (calls the callback and frees the ACB).
- * Be sure to be in the right AsyncContext before calling this function.
*/
static void qemu_laio_process_completion(struct qemu_laio_state *s,
struct qemu_laiocb *laiocb)
@@ -72,42 +69,12 @@ static void qemu_laio_process_completion(struct qemu_laio_state *s,
}
/*
- * Processes all queued AIO requests, i.e. requests that have return from OS
- * but their callback was not called yet. Requests that cannot have their
- * callback called in the current AsyncContext, remain in the queue.
- *
- * Returns 1 if at least one request could be completed, 0 otherwise.
+ * All requests are directly processed when they complete, so there's nothing
+ * left to do during qemu_aio_wait().
*/
static int qemu_laio_process_requests(void *opaque)
{
- struct qemu_laio_state *s = opaque;
- struct qemu_laiocb *laiocb, *next;
- int res = 0;
-
- QLIST_FOREACH_SAFE (laiocb, &s->completed_reqs, node, next) {
- if (laiocb->async_context_id == get_async_context_id()) {
- qemu_laio_process_completion(s, laiocb);
- QLIST_REMOVE(laiocb, node);
- res = 1;
- }
- }
-
- return res;
-}
-
-/*
- * Puts a request in the completion queue so that its callback is called the
- * next time when it's possible. If we already are in the right AsyncContext,
- * the request is completed immediately instead.
- */
-static void qemu_laio_enqueue_completed(struct qemu_laio_state *s,
- struct qemu_laiocb* laiocb)
-{
- if (laiocb->async_context_id == get_async_context_id()) {
- qemu_laio_process_completion(s, laiocb);
- } else {
- QLIST_INSERT_HEAD(&s->completed_reqs, laiocb, node);
- }
+ return 0;
}
static void qemu_laio_completion_cb(void *opaque)
@@ -141,7 +108,7 @@ static void qemu_laio_completion_cb(void *opaque)
container_of(iocb, struct qemu_laiocb, iocb);
laiocb->ret = io_event_ret(&events[i]);
- qemu_laio_enqueue_completed(s, laiocb);
+ qemu_laio_process_completion(s, laiocb);
}
}
}
@@ -204,7 +171,6 @@ BlockDriverAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
laiocb->nbytes = nb_sectors * 512;
laiocb->ctx = s;
laiocb->ret = -EINPROGRESS;
- laiocb->async_context_id = get_async_context_id();
iocbs = &laiocb->iocb;
@@ -239,7 +205,6 @@ void *laio_init(void)
struct qemu_laio_state *s;
s = qemu_mallocz(sizeof(*s));
- QLIST_INIT(&s->completed_reqs);
s->efd = eventfd(0, 0);
if (s->efd == -1)
goto out_free_state;
diff --git a/posix-aio-compat.c b/posix-aio-compat.c
index c4116e30f2..788d113860 100644
--- a/posix-aio-compat.c
+++ b/posix-aio-compat.c
@@ -49,8 +49,6 @@ struct qemu_paiocb {
ssize_t ret;
int active;
struct qemu_paiocb *next;
-
- int async_context_id;
};
typedef struct PosixAioState {
@@ -420,7 +418,6 @@ static int posix_aio_process_queue(void *opaque)
struct qemu_paiocb *acb, **pacb;
int ret;
int result = 0;
- int async_context_id = get_async_context_id();
for(;;) {
pacb = &s->first_aio;
@@ -429,12 +426,6 @@ static int posix_aio_process_queue(void *opaque)
if (!acb)
return result;
- /* we're only interested in requests in the right context */
- if (acb->async_context_id != async_context_id) {
- pacb = &acb->next;
- continue;
- }
-
ret = qemu_paio_error(acb);
if (ret == ECANCELED) {
/* remove the request */
@@ -575,7 +566,6 @@ BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd,
acb->aio_type = type;
acb->aio_fildes = fd;
acb->ev_signo = SIGUSR2;
- acb->async_context_id = get_async_context_id();
if (qiov) {
acb->aio_iov = qiov->iov;
@@ -604,7 +594,6 @@ BlockDriverAIOCB *paio_ioctl(BlockDriverState *bs, int fd,
acb->aio_type = QEMU_AIO_IOCTL;
acb->aio_fildes = fd;
acb->ev_signo = SIGUSR2;
- acb->async_context_id = get_async_context_id();
acb->aio_offset = 0;
acb->aio_ioctl_buf = buf;
acb->aio_ioctl_cmd = req;
diff --git a/qemu-common.h b/qemu-common.h
index 1e3c66511e..8f21a8cb29 100644
--- a/qemu-common.h
+++ b/qemu-common.h
@@ -115,10 +115,6 @@ int qemu_main(int argc, char **argv, char **envp);
/* bottom halves */
typedef void QEMUBHFunc(void *opaque);
-void async_context_push(void);
-void async_context_pop(void);
-int get_async_context_id(void);
-
QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
void qemu_bh_schedule(QEMUBH *bh);
/* Bottom halfs that are scheduled from a bottom half handler are instantly