aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorMORITA Kazutaka <morita.kazutaka@lab.ntt.co.jp>2013-10-24 16:01:17 +0900
committerKevin Wolf <kwolf@redhat.com>2013-10-30 12:22:20 +0100
commit35200687a1e04a79b0345be476185dc23d1604fb (patch)
tree289b6a2df412e5df807c2daea0ea84f1c1a28215 /block
parenta37dcdf9aea8e19fcec6b1c5aa2c27c325fc4644 (diff)
sheepdog: cancel aio requests if possible
This patch tries to cancel aio requests in pending queue and failed queue. When the sheepdog driver cannot cancel the requests, it waits for them to be completed. Signed-off-by: MORITA Kazutaka <morita.kazutaka@lab.ntt.co.jp> Tested-by: Liu Yuan <namei.unix@gmail.com> Reviewed-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/sheepdog.c70
1 files changed, 59 insertions, 11 deletions
diff --git a/block/sheepdog.c b/block/sheepdog.c
index 8790494126..eebb5feb31 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -299,7 +299,8 @@ struct SheepdogAIOCB {
Coroutine *coroutine;
void (*aio_done_func)(SheepdogAIOCB *);
- bool canceled;
+ bool cancelable;
+ bool *finished;
int nr_pending;
};
@@ -418,6 +419,7 @@ static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req)
{
SheepdogAIOCB *acb = aio_req->aiocb;
+ acb->cancelable = false;
QLIST_REMOVE(aio_req, aio_siblings);
g_free(aio_req);
@@ -426,23 +428,68 @@ static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req)
static void coroutine_fn sd_finish_aiocb(SheepdogAIOCB *acb)
{
- if (!acb->canceled) {
- qemu_coroutine_enter(acb->coroutine, NULL);
+ qemu_coroutine_enter(acb->coroutine, NULL);
+ if (acb->finished) {
+ *acb->finished = true;
}
qemu_aio_release(acb);
}
+/*
+ * Check whether the specified acb can be canceled
+ *
+ * We can cancel aio when any request belonging to the acb is:
+ * - Not processed by the sheepdog server.
+ * - Not linked to the inflight queue.
+ */
+static bool sd_acb_cancelable(const SheepdogAIOCB *acb)
+{
+ BDRVSheepdogState *s = acb->common.bs->opaque;
+ AIOReq *aioreq;
+
+ if (!acb->cancelable) {
+ return false;
+ }
+
+ QLIST_FOREACH(aioreq, &s->inflight_aio_head, aio_siblings) {
+ if (aioreq->aiocb == acb) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
static void sd_aio_cancel(BlockDriverAIOCB *blockacb)
{
SheepdogAIOCB *acb = (SheepdogAIOCB *)blockacb;
+ BDRVSheepdogState *s = acb->common.bs->opaque;
+ AIOReq *aioreq, *next;
+ bool finished = false;
+
+ acb->finished = &finished;
+ while (!finished) {
+ if (sd_acb_cancelable(acb)) {
+ /* Remove outstanding requests from pending and failed queues. */
+ QLIST_FOREACH_SAFE(aioreq, &s->pending_aio_head, aio_siblings,
+ next) {
+ if (aioreq->aiocb == acb) {
+ free_aio_req(s, aioreq);
+ }
+ }
+ QLIST_FOREACH_SAFE(aioreq, &s->failed_aio_head, aio_siblings,
+ next) {
+ if (aioreq->aiocb == acb) {
+ free_aio_req(s, aioreq);
+ }
+ }
- /*
- * Sheepdog cannot cancel the requests which are already sent to
- * the servers, so we just complete the request with -EIO here.
- */
- acb->ret = -EIO;
- qemu_coroutine_enter(acb->coroutine, NULL);
- acb->canceled = true;
+ assert(acb->nr_pending == 0);
+ sd_finish_aiocb(acb);
+ return;
+ }
+ qemu_aio_wait();
+ }
}
static const AIOCBInfo sd_aiocb_info = {
@@ -463,7 +510,8 @@ static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov,
acb->nb_sectors = nb_sectors;
acb->aio_done_func = NULL;
- acb->canceled = false;
+ acb->cancelable = true;
+ acb->finished = NULL;
acb->coroutine = qemu_coroutine_self();
acb->ret = 0;
acb->nr_pending = 0;