aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/curl.c25
-rw-r--r--block/gluster.c23
-rw-r--r--block/iscsi.c10
-rw-r--r--block/linux-aio.c18
-rw-r--r--block/nbd.c18
-rw-r--r--block/rbd.c16
-rw-r--r--block/sheepdog.c33
-rw-r--r--block/ssh.c12
-rw-r--r--block/stream.c6
9 files changed, 31 insertions, 130 deletions
diff --git a/block/curl.c b/block/curl.c
index 82d39ff53f..e566855f76 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -86,7 +86,6 @@ typedef struct BDRVCURLState {
static void curl_clean_state(CURLState *s);
static void curl_multi_do(void *arg);
-static int curl_aio_flush(void *opaque);
static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
void *s, void *sp)
@@ -94,17 +93,16 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd);
switch (action) {
case CURL_POLL_IN:
- qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, curl_aio_flush, s);
+ qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, s);
break;
case CURL_POLL_OUT:
- qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, curl_aio_flush, s);
+ qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, s);
break;
case CURL_POLL_INOUT:
- qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do,
- curl_aio_flush, s);
+ qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, s);
break;
case CURL_POLL_REMOVE:
- qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL);
+ qemu_aio_set_fd_handler(fd, NULL, NULL, NULL);
break;
}
@@ -495,21 +493,6 @@ out_noclean:
return -EINVAL;
}
-static int curl_aio_flush(void *opaque)
-{
- BDRVCURLState *s = opaque;
- int i, j;
-
- for (i=0; i < CURL_NUM_STATES; i++) {
- for(j=0; j < CURL_NUM_ACB; j++) {
- if (s->states[i].acb[j]) {
- return 1;
- }
- }
- }
- return 0;
-}
-
static void curl_aio_cancel(BlockDriverAIOCB *blockacb)
{
// Do we have to implement canceling? Seems to work without...
diff --git a/block/gluster.c b/block/gluster.c
index 645b7f12a5..46f36f8cd6 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -32,7 +32,6 @@ typedef struct BDRVGlusterState {
struct glfs *glfs;
int fds[2];
struct glfs_fd *fd;
- int qemu_aio_count;
int event_reader_pos;
GlusterAIOCB *event_acb;
} BDRVGlusterState;
@@ -247,7 +246,6 @@ static void qemu_gluster_complete_aio(GlusterAIOCB *acb, BDRVGlusterState *s)
ret = -EIO; /* Partial read/write - fail it */
}
- s->qemu_aio_count--;
qemu_aio_release(acb);
cb(opaque, ret);
if (finished) {
@@ -275,13 +273,6 @@ static void qemu_gluster_aio_event_reader(void *opaque)
} while (ret < 0 && errno == EINTR);
}
-static int qemu_gluster_aio_flush_cb(void *opaque)
-{
- BDRVGlusterState *s = opaque;
-
- return (s->qemu_aio_count > 0);
-}
-
/* TODO Convert to fine grained options */
static QemuOptsList runtime_opts = {
.name = "gluster",
@@ -348,7 +339,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
}
fcntl(s->fds[GLUSTER_FD_READ], F_SETFL, O_NONBLOCK);
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ],
- qemu_gluster_aio_event_reader, NULL, qemu_gluster_aio_flush_cb, s);
+ qemu_gluster_aio_event_reader, NULL, s);
out:
qemu_opts_del(opts);
@@ -445,11 +436,9 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
qemu_mutex_lock_iothread(); /* We are in gluster thread context */
acb->common.cb(acb->common.opaque, -EIO);
qemu_aio_release(acb);
- s->qemu_aio_count--;
close(s->fds[GLUSTER_FD_READ]);
close(s->fds[GLUSTER_FD_WRITE]);
- qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL,
- NULL);
+ qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL);
bs->drv = NULL; /* Make the disk inaccessible */
qemu_mutex_unlock_iothread();
}
@@ -467,7 +456,6 @@ static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs,
offset = sector_num * BDRV_SECTOR_SIZE;
size = nb_sectors * BDRV_SECTOR_SIZE;
- s->qemu_aio_count++;
acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
acb->size = size;
@@ -488,7 +476,6 @@ static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs,
return &acb->common;
out:
- s->qemu_aio_count--;
qemu_aio_release(acb);
return NULL;
}
@@ -531,7 +518,6 @@ static BlockDriverAIOCB *qemu_gluster_aio_flush(BlockDriverState *bs,
acb->size = 0;
acb->ret = 0;
acb->finished = NULL;
- s->qemu_aio_count++;
ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
if (ret < 0) {
@@ -540,7 +526,6 @@ static BlockDriverAIOCB *qemu_gluster_aio_flush(BlockDriverState *bs,
return &acb->common;
out:
- s->qemu_aio_count--;
qemu_aio_release(acb);
return NULL;
}
@@ -563,7 +548,6 @@ static BlockDriverAIOCB *qemu_gluster_aio_discard(BlockDriverState *bs,
acb->size = 0;
acb->ret = 0;
acb->finished = NULL;
- s->qemu_aio_count++;
ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
if (ret < 0) {
@@ -572,7 +556,6 @@ static BlockDriverAIOCB *qemu_gluster_aio_discard(BlockDriverState *bs,
return &acb->common;
out:
- s->qemu_aio_count--;
qemu_aio_release(acb);
return NULL;
}
@@ -611,7 +594,7 @@ static void qemu_gluster_close(BlockDriverState *bs)
close(s->fds[GLUSTER_FD_READ]);
close(s->fds[GLUSTER_FD_WRITE]);
- qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL, NULL);
+ qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL);
if (s->fd) {
glfs_close(s->fd);
diff --git a/block/iscsi.c b/block/iscsi.c
index e7c1c2b538..47a3adc9b5 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -146,13 +146,6 @@ static const AIOCBInfo iscsi_aiocb_info = {
static void iscsi_process_read(void *arg);
static void iscsi_process_write(void *arg);
-static int iscsi_process_flush(void *arg)
-{
- IscsiLun *iscsilun = arg;
-
- return iscsi_queue_length(iscsilun->iscsi) > 0;
-}
-
static void
iscsi_set_events(IscsiLun *iscsilun)
{
@@ -166,7 +159,6 @@ iscsi_set_events(IscsiLun *iscsilun)
qemu_aio_set_fd_handler(iscsi_get_fd(iscsi),
iscsi_process_read,
(ev & POLLOUT) ? iscsi_process_write : NULL,
- iscsi_process_flush,
iscsilun);
}
@@ -1215,7 +1207,7 @@ static void iscsi_close(BlockDriverState *bs)
qemu_del_timer(iscsilun->nop_timer);
qemu_free_timer(iscsilun->nop_timer);
}
- qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL, NULL);
+ qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL);
iscsi_destroy_context(iscsi);
memset(iscsilun, 0, sizeof(IscsiLun));
}
diff --git a/block/linux-aio.c b/block/linux-aio.c
index ee0f8d10c9..53434e2df5 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -39,7 +39,6 @@ struct qemu_laiocb {
struct qemu_laio_state {
io_context_t ctx;
EventNotifier e;
- int count;
};
static inline ssize_t io_event_ret(struct io_event *ev)
@@ -55,8 +54,6 @@ static void qemu_laio_process_completion(struct qemu_laio_state *s,
{
int ret;
- s->count--;
-
ret = laiocb->ret;
if (ret != -ECANCELED) {
if (ret == laiocb->nbytes) {
@@ -101,13 +98,6 @@ static void qemu_laio_completion_cb(EventNotifier *e)
}
}
-static int qemu_laio_flush_cb(EventNotifier *e)
-{
- struct qemu_laio_state *s = container_of(e, struct qemu_laio_state, e);
-
- return (s->count > 0) ? 1 : 0;
-}
-
static void laio_cancel(BlockDriverAIOCB *blockacb)
{
struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb;
@@ -177,14 +167,11 @@ BlockDriverAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
goto out_free_aiocb;
}
io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
- s->count++;
if (io_submit(s->ctx, 1, &iocbs) < 0)
- goto out_dec_count;
+ goto out_free_aiocb;
return &laiocb->common;
-out_dec_count:
- s->count--;
out_free_aiocb:
qemu_aio_release(laiocb);
return NULL;
@@ -203,8 +190,7 @@ void *laio_init(void)
goto out_close_efd;
}
- qemu_aio_set_event_notifier(&s->e, qemu_laio_completion_cb,
- qemu_laio_flush_cb);
+ qemu_aio_set_event_notifier(&s->e, qemu_laio_completion_cb);
return s;
diff --git a/block/nbd.c b/block/nbd.c
index 9c480b8f26..691066f726 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -279,13 +279,6 @@ static void nbd_coroutine_start(BDRVNBDState *s, struct nbd_request *request)
request->handle = INDEX_TO_HANDLE(s, i);
}
-static int nbd_have_request(void *opaque)
-{
- BDRVNBDState *s = opaque;
-
- return s->in_flight > 0;
-}
-
static void nbd_reply_ready(void *opaque)
{
BDRVNBDState *s = opaque;
@@ -341,8 +334,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request,
qemu_co_mutex_lock(&s->send_mutex);
s->send_coroutine = qemu_coroutine_self();
- qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write,
- nbd_have_request, s);
+ qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, s);
if (qiov) {
if (!s->is_unix) {
socket_set_cork(s->sock, 1);
@@ -361,8 +353,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request,
} else {
rc = nbd_send_request(s->sock, request);
}
- qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL,
- nbd_have_request, s);
+ qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, s);
s->send_coroutine = NULL;
qemu_co_mutex_unlock(&s->send_mutex);
return rc;
@@ -438,8 +429,7 @@ static int nbd_establish_connection(BlockDriverState *bs)
/* Now that we're connected, set the socket to be non-blocking and
* kick the reply mechanism. */
qemu_set_nonblock(sock);
- qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL,
- nbd_have_request, s);
+ qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL, s);
s->sock = sock;
s->size = size;
@@ -459,7 +449,7 @@ static void nbd_teardown_connection(BlockDriverState *bs)
request.len = 0;
nbd_send_request(s->sock, &request);
- qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL);
+ qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL);
closesocket(s->sock);
}
diff --git a/block/rbd.c b/block/rbd.c
index cb71751218..e798e19f81 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -100,7 +100,6 @@ typedef struct BDRVRBDState {
rados_ioctx_t io_ctx;
rbd_image_t image;
char name[RBD_MAX_IMAGE_NAME_SIZE];
- int qemu_aio_count;
char *snap;
int event_reader_pos;
RADOSCB *event_rcb;
@@ -428,19 +427,11 @@ static void qemu_rbd_aio_event_reader(void *opaque)
if (s->event_reader_pos == sizeof(s->event_rcb)) {
s->event_reader_pos = 0;
qemu_rbd_complete_aio(s->event_rcb);
- s->qemu_aio_count--;
}
}
} while (ret < 0 && errno == EINTR);
}
-static int qemu_rbd_aio_flush_cb(void *opaque)
-{
- BDRVRBDState *s = opaque;
-
- return (s->qemu_aio_count > 0);
-}
-
/* TODO Convert to fine grained options */
static QemuOptsList runtime_opts = {
.name = "rbd",
@@ -554,7 +545,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags)
fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader,
- NULL, qemu_rbd_aio_flush_cb, s);
+ NULL, s);
qemu_opts_del(opts);
@@ -578,7 +569,7 @@ static void qemu_rbd_close(BlockDriverState *bs)
close(s->fds[0]);
close(s->fds[1]);
- qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL);
+ qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL);
rbd_close(s->image);
rados_ioctx_destroy(s->io_ctx);
@@ -741,8 +732,6 @@ static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs,
off = sector_num * BDRV_SECTOR_SIZE;
size = nb_sectors * BDRV_SECTOR_SIZE;
- s->qemu_aio_count++; /* All the RADOSCB */
-
rcb = g_malloc(sizeof(RADOSCB));
rcb->done = 0;
rcb->acb = acb;
@@ -779,7 +768,6 @@ static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs,
failed:
g_free(rcb);
- s->qemu_aio_count--;
qemu_aio_release(acb);
return NULL;
}
diff --git a/block/sheepdog.c b/block/sheepdog.c
index afe053376c..1ad4d070e7 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -509,13 +509,6 @@ static void restart_co_req(void *opaque)
qemu_coroutine_enter(co, NULL);
}
-static int have_co_req(void *opaque)
-{
- /* this handler is set only when there is a pending request, so
- * always returns 1. */
- return 1;
-}
-
typedef struct SheepdogReqCo {
int sockfd;
SheepdogReq *hdr;
@@ -538,14 +531,14 @@ static coroutine_fn void do_co_req(void *opaque)
unsigned int *rlen = srco->rlen;
co = qemu_coroutine_self();
- qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, have_co_req, co);
+ qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, co);
ret = send_co_req(sockfd, hdr, data, wlen);
if (ret < 0) {
goto out;
}
- qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, have_co_req, co);
+ qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, co);
ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
if (ret < sizeof(*hdr)) {
@@ -570,7 +563,7 @@ static coroutine_fn void do_co_req(void *opaque)
out:
/* there is at most one request for this sockfd, so it is safe to
* set each handler to NULL. */
- qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL, NULL);
+ qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL);
srco->ret = ret;
srco->finished = true;
@@ -796,14 +789,6 @@ static void co_write_request(void *opaque)
qemu_coroutine_enter(s->co_send, NULL);
}
-static int aio_flush_request(void *opaque)
-{
- BDRVSheepdogState *s = opaque;
-
- return !QLIST_EMPTY(&s->inflight_aio_head) ||
- !QLIST_EMPTY(&s->pending_aio_head);
-}
-
/*
* Return a socket discriptor to read/write objects.
*
@@ -819,7 +804,7 @@ static int get_sheep_fd(BDRVSheepdogState *s)
return fd;
}
- qemu_aio_set_fd_handler(fd, co_read_response, NULL, aio_flush_request, s);
+ qemu_aio_set_fd_handler(fd, co_read_response, NULL, s);
return fd;
}
@@ -1069,8 +1054,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
qemu_co_mutex_lock(&s->lock);
s->co_send = qemu_coroutine_self();
- qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request,
- aio_flush_request, s);
+ qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, s);
socket_set_cork(s->fd, 1);
/* send a header */
@@ -1091,8 +1075,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
}
socket_set_cork(s->fd, 0);
- qemu_aio_set_fd_handler(s->fd, co_read_response, NULL,
- aio_flush_request, s);
+ qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, s);
qemu_co_mutex_unlock(&s->lock);
return 0;
@@ -1350,7 +1333,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags)
g_free(buf);
return 0;
out:
- qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL);
+ qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL);
if (s->fd >= 0) {
closesocket(s->fd);
}
@@ -1578,7 +1561,7 @@ static void sd_close(BlockDriverState *bs)
error_report("%s, %s", sd_strerror(rsp->result), s->name);
}
- qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL);
+ qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL);
closesocket(s->fd);
g_free(s->host_spec);
}
diff --git a/block/ssh.c b/block/ssh.c
index d7e7bf8dd2..27691b4ad5 100644
--- a/block/ssh.c
+++ b/block/ssh.c
@@ -740,14 +740,6 @@ static void restart_coroutine(void *opaque)
qemu_coroutine_enter(co, NULL);
}
-/* Always true because when we have called set_fd_handler there is
- * always a request being processed.
- */
-static int return_true(void *opaque)
-{
- return 1;
-}
-
static coroutine_fn void set_fd_handler(BDRVSSHState *s)
{
int r;
@@ -766,13 +758,13 @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s)
DPRINTF("s->sock=%d rd_handler=%p wr_handler=%p", s->sock,
rd_handler, wr_handler);
- qemu_aio_set_fd_handler(s->sock, rd_handler, wr_handler, return_true, co);
+ qemu_aio_set_fd_handler(s->sock, rd_handler, wr_handler, co);
}
static coroutine_fn void clear_fd_handler(BDRVSSHState *s)
{
DPRINTF("s->sock=%d", s->sock);
- qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL);
+ qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL);
}
/* A non-blocking call returned EAGAIN, so yield, ensuring the
diff --git a/block/stream.c b/block/stream.c
index 7fe9e486bf..db49b4d85f 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -57,6 +57,11 @@ static void close_unused_images(BlockDriverState *top, BlockDriverState *base,
BlockDriverState *intermediate;
intermediate = top->backing_hd;
+ /* Must assign before bdrv_delete() to prevent traversing dangling pointer
+ * while we delete backing image instances.
+ */
+ top->backing_hd = base;
+
while (intermediate) {
BlockDriverState *unused;
@@ -70,7 +75,6 @@ static void close_unused_images(BlockDriverState *top, BlockDriverState *base,
unused->backing_hd = NULL;
bdrv_delete(unused);
}
- top->backing_hd = base;
}
static void coroutine_fn stream_run(void *opaque)