diff options
author | Fabien Chouteau <chouteau@adacore.com> | 2013-01-08 16:30:56 +0100 |
---|---|---|
committer | Anthony Liguori <aliguori@us.ibm.com> | 2013-01-09 11:03:05 -0600 |
commit | 5e3bc735d93dd23f074b5116fd11e1ad8cd4962f (patch) | |
tree | 384a280b0892ee81c14d00a106627337913748b2 /main-loop.c | |
parent | 7cd5da7eef152a533c5774effd2e7bbfa5976c86 (diff) |
Check return values from g_poll and select
The current implementation of os_host_main_loop_wait() on Windows,
returns 1 only when a g_poll() event occurs because the return value of
select() is overridden. This is wrong as we may skip a socket event, as
shown in this example:
1. select() returns 0
2. g_poll() returns 1 (socket event occurs)
3. os_host_main_loop_wait() returns 1
4. qemu_iohandler_poll() sees no socket event because select() has
return before the event occurs
5. select() returns 1
6. g_poll() returns 0 (g_poll overrides select's return value)
7. os_host_main_loop_wait() returns 0
8. qemu_iohandler_poll() doesn't check for socket events because the
return value of os_host_main_loop_wait() is zero.
9. goto 5
This patch use one variable for each of these return values, so we don't
miss a select() event anymore.
Also move the call to select() after g_poll(), this will improve latency
as we don't have to go through two os_host_main_loop_wait() calls to
detect a socket event.
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Fabien Chouteau <chouteau@adacore.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'main-loop.c')
-rw-r--r-- | main-loop.c | 27 |
1 files changed, 13 insertions, 14 deletions
diff --git a/main-loop.c b/main-loop.c index 54f38ae1ae..6f52ac39bc 100644 --- a/main-loop.c +++ b/main-loop.c @@ -330,7 +330,7 @@ void qemu_fd_register(int fd) static int os_host_main_loop_wait(uint32_t timeout) { GMainContext *context = g_main_context_default(); - int ret, i; + int select_ret, g_poll_ret, ret, i; PollingEntry *pe; WaitObjects *w = &wait_objects; gint poll_timeout; @@ -345,13 +345,6 @@ static int os_host_main_loop_wait(uint32_t timeout) return ret; } - if (nfds >= 0) { - ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0); - if (ret != 0) { - timeout = 0; - } - } - g_main_context_prepare(context, &max_priority); n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout, poll_fds, ARRAY_SIZE(poll_fds)); @@ -367,9 +360,9 @@ static int os_host_main_loop_wait(uint32_t timeout) } qemu_mutex_unlock_iothread(); - ret = g_poll(poll_fds, n_poll_fds + w->num, poll_timeout); + g_poll_ret = g_poll(poll_fds, n_poll_fds + w->num, poll_timeout); qemu_mutex_lock_iothread(); - if (ret > 0) { + if (g_poll_ret > 0) { for (i = 0; i < w->num; i++) { w->revents[i] = poll_fds[n_poll_fds + i].revents; } @@ -384,12 +377,18 @@ static int os_host_main_loop_wait(uint32_t timeout) g_main_context_dispatch(context); } - /* If an edge-triggered socket event occurred, select will return a - * positive result on the next iteration. We do not need to do anything - * here. + /* Call select after g_poll to avoid a useless iteration and therefore + * improve socket latency. */ - return ret; + if (nfds >= 0) { + select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0); + if (select_ret != 0) { + timeout = 0; + } + } + + return select_ret || g_poll_ret; } #endif |