diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2012-04-12 14:00:54 +0200 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2012-04-19 16:37:53 +0200 |
commit | bafbd6a1c69fef73500309dc31c86984c6d22b43 (patch) | |
tree | 6572fe65223273c25466929eb9495da066652343 | |
parent | adfe92f6d18c0e0a3694e19abb58eb55fd0c5993 (diff) |
aio: remove process_queue callback and qemu_aio_process_queue
Both unused after the previous patch.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
-rw-r--r-- | aio.c | 29 | ||||
-rw-r--r-- | block/curl.c | 10 | ||||
-rw-r--r-- | block/iscsi.c | 4 | ||||
-rw-r--r-- | block/nbd.c | 8 | ||||
-rw-r--r-- | block/rbd.c | 5 | ||||
-rw-r--r-- | block/sheepdog.c | 11 | ||||
-rw-r--r-- | linux-aio.c | 2 | ||||
-rw-r--r-- | posix-aio-compat.c | 3 | ||||
-rw-r--r-- | qemu-aio.h | 13 |
9 files changed, 21 insertions, 64 deletions
@@ -35,7 +35,6 @@ struct AioHandler IOHandler *io_read; IOHandler *io_write; AioFlushHandler *io_flush; - AioProcessQueue *io_process_queue; int deleted; void *opaque; QLIST_ENTRY(AioHandler) node; @@ -58,7 +57,6 @@ int qemu_aio_set_fd_handler(int fd, IOHandler *io_read, IOHandler *io_write, AioFlushHandler *io_flush, - AioProcessQueue *io_process_queue, void *opaque) { AioHandler *node; @@ -91,7 +89,6 @@ int qemu_aio_set_fd_handler(int fd, node->io_read = io_read; node->io_write = io_write; node->io_flush = io_flush; - node->io_process_queue = io_process_queue; node->opaque = opaque; } @@ -122,39 +119,17 @@ void qemu_aio_flush(void) } while (qemu_bh_poll() || ret > 0); } -int qemu_aio_process_queue(void) -{ - AioHandler *node; - int ret = 0; - - walking_handlers = 1; - - QLIST_FOREACH(node, &aio_handlers, node) { - if (node->io_process_queue) { - if (node->io_process_queue(node->opaque)) { - ret = 1; - } - } - } - - walking_handlers = 0; - - return ret; -} - void qemu_aio_wait(void) { int ret; - if (qemu_bh_poll()) - return; - /* * If there are callbacks left that have been queued, we need to call then. * Return afterwards to avoid waiting needlessly in select(). */ - if (qemu_aio_process_queue()) + if (qemu_bh_poll()) { return; + } do { AioHandler *node; diff --git a/block/curl.c b/block/curl.c index a909eca337..bf3680ba57 100644 --- a/block/curl.c +++ b/block/curl.c @@ -89,19 +89,17 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd); switch (action) { case CURL_POLL_IN: - qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, curl_aio_flush, - NULL, s); + qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, curl_aio_flush, s); break; case CURL_POLL_OUT: - qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, curl_aio_flush, - NULL, s); + qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, curl_aio_flush, s); break; case CURL_POLL_INOUT: qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, - curl_aio_flush, NULL, s); + curl_aio_flush, s); break; case CURL_POLL_REMOVE: - qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL); break; } diff --git a/block/iscsi.c b/block/iscsi.c index bd3ca11b2e..5222726d0f 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -108,7 +108,7 @@ iscsi_set_events(IscsiLun *iscsilun) qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), iscsi_process_read, (iscsi_which_events(iscsi) & POLLOUT) ? iscsi_process_write : NULL, - iscsi_process_flush, NULL, iscsilun); + iscsi_process_flush, iscsilun); } static void @@ -682,7 +682,7 @@ static void iscsi_close(BlockDriverState *bs) IscsiLun *iscsilun = bs->opaque; struct iscsi_context *iscsi = iscsilun->iscsi; - qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL, NULL); iscsi_destroy_context(iscsi); memset(iscsilun, 0, sizeof(IscsiLun)); } diff --git a/block/nbd.c b/block/nbd.c index 161b299855..524c9cf412 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -191,7 +191,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, qemu_co_mutex_lock(&s->send_mutex); s->send_coroutine = qemu_coroutine_self(); qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, - nbd_have_request, NULL, s); + nbd_have_request, s); rc = nbd_send_request(s->sock, request); if (rc != -1 && iov) { ret = qemu_co_sendv(s->sock, iov, request->len, offset); @@ -201,7 +201,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, } } qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, - nbd_have_request, NULL, s); + nbd_have_request, s); s->send_coroutine = NULL; qemu_co_mutex_unlock(&s->send_mutex); return rc; @@ -274,7 +274,7 @@ static int nbd_establish_connection(BlockDriverState *bs) * kick the reply mechanism. */ socket_set_nonblock(sock); qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, - nbd_have_request, NULL, s); + nbd_have_request, s); s->sock = sock; s->size = size; @@ -294,7 +294,7 @@ static void nbd_teardown_connection(BlockDriverState *bs) request.len = 0; nbd_send_request(s->sock, &request); - qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL); closesocket(s->sock); } diff --git a/block/rbd.c b/block/rbd.c index 46a8579018..6cd84488e4 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -504,7 +504,7 @@ static int qemu_rbd_open(BlockDriverState *bs, const char *filename, int flags) fcntl(s->fds[0], F_SETFL, O_NONBLOCK); fcntl(s->fds[1], F_SETFL, O_NONBLOCK); qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader, - NULL, qemu_rbd_aio_flush_cb, NULL, s); + NULL, qemu_rbd_aio_flush_cb, s); return 0; @@ -525,8 +525,7 @@ static void qemu_rbd_close(BlockDriverState *bs) close(s->fds[0]); close(s->fds[1]); - qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL , NULL, NULL, NULL, - NULL); + qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL); rbd_close(s->image); rados_ioctx_destroy(s->io_ctx); diff --git a/block/sheepdog.c b/block/sheepdog.c index 3eaf625e98..0ed6b193c9 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -799,8 +799,7 @@ static int get_sheep_fd(BDRVSheepdogState *s) return -1; } - qemu_aio_set_fd_handler(fd, co_read_response, NULL, aio_flush_request, - NULL, s); + qemu_aio_set_fd_handler(fd, co_read_response, NULL, aio_flush_request, s); return fd; } @@ -973,7 +972,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, qemu_co_mutex_lock(&s->lock); s->co_send = qemu_coroutine_self(); qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, - aio_flush_request, NULL, s); + aio_flush_request, s); socket_set_cork(s->fd, 1); /* send a header */ @@ -995,7 +994,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, socket_set_cork(s->fd, 0); qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, - aio_flush_request, NULL, s); + aio_flush_request, s); qemu_co_mutex_unlock(&s->lock); return 0; @@ -1135,7 +1134,7 @@ static int sd_open(BlockDriverState *bs, const char *filename, int flags) g_free(buf); return 0; out: - qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL); if (s->fd >= 0) { closesocket(s->fd); } @@ -1349,7 +1348,7 @@ static void sd_close(BlockDriverState *bs) error_report("%s, %s", sd_strerror(rsp->result), s->name); } - qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL); closesocket(s->fd); if (s->cache_enabled) { closesocket(s->flush_fd); diff --git a/linux-aio.c b/linux-aio.c index 15261ece3d..fa0fbf34aa 100644 --- a/linux-aio.c +++ b/linux-aio.c @@ -214,7 +214,7 @@ void *laio_init(void) goto out_close_efd; qemu_aio_set_fd_handler(s->efd, qemu_laio_completion_cb, NULL, - qemu_laio_flush_cb, NULL, s); + qemu_laio_flush_cb, s); return s; diff --git a/posix-aio-compat.c b/posix-aio-compat.c index 1066c601d2..68361f555a 100644 --- a/posix-aio-compat.c +++ b/posix-aio-compat.c @@ -663,8 +663,7 @@ int paio_init(void) fcntl(s->rfd, F_SETFL, O_NONBLOCK); fcntl(s->wfd, F_SETFL, O_NONBLOCK); - qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush, - NULL, s); + qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush, s); ret = pthread_attr_init(&attr); if (ret) diff --git a/qemu-aio.h b/qemu-aio.h index 230c2f79a0..0fc84093c0 100644 --- a/qemu-aio.h +++ b/qemu-aio.h @@ -41,11 +41,6 @@ void qemu_aio_release(void *p); /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */ typedef int (AioFlushHandler)(void *opaque); -/* Runs all currently allowed AIO callbacks of completed requests in the - * respective AIO backend. Returns 0 if no requests was handled, non-zero - * if at least one queued request was handled. */ -typedef int (AioProcessQueue)(void *opaque); - /* Flush any pending AIO operation. This function will block until all * outstanding AIO operations have been completed or cancelled. */ void qemu_aio_flush(void); @@ -56,13 +51,6 @@ void qemu_aio_flush(void); * result of executing I/O completion or bh callbacks. */ void qemu_aio_wait(void); -/* - * Runs all currently allowed AIO callbacks of completed requests. Returns 0 - * if no requests were handled, non-zero if at least one request was - * processed. - */ -int qemu_aio_process_queue(void); - /* Register a file descriptor and associated callbacks. Behaves very similarly * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will * be invoked when using either qemu_aio_wait() or qemu_aio_flush(). @@ -74,7 +62,6 @@ int qemu_aio_set_fd_handler(int fd, IOHandler *io_read, IOHandler *io_write, AioFlushHandler *io_flush, - AioProcessQueue *io_process_queue, void *opaque); #endif |