aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorAnthony Liguori <anthony@codemonkey.ws>2013-08-26 09:19:50 -0500
committerAnthony Liguori <anthony@codemonkey.ws>2013-08-26 09:19:50 -0500
commitf7ad538e1ea130c8b6f3abb06ad6c856242c799e (patch)
treebeed9203bd5083d854c4628eb0c9c29518f77d67 /tests
parente3f024aec29a2e3eff46138687e2ecba7631c645 (diff)
parentb10577df13fa4a1b38ea6c1ea7b66c6dfd90a07a (diff)
Merge remote-tracking branch 'stefanha/block' into staging
# By Alex Bligh (32) and others # Via Stefan Hajnoczi * stefanha/block: (42 commits) win32-aio: drop win32_aio_flush_cb() aio-win32: replace incorrect AioHandler->opaque usage with ->e aio / timers: remove dummy_io_handler_flush from tests/test-aio.c aio / timers: Remove legacy interface aio / timers: Switch entire codebase to the new timer API aio / timers: Add scripts/switch-timer-api aio / timers: Add test harness for AioContext timers aio / timers: convert block_job_sleep_ns and co_sleep_ns to new API aio / timers: Convert rtc_clock to be a QEMUClockType aio / timers: Remove main_loop_timerlist aio / timers: Rearrange timer.h & make legacy functions call non-legacy aio / timers: Add qemu_clock_get_ms and qemu_clock_get_ms aio / timers: Remove legacy qemu_clock_deadline & qemu_timerlist_deadline aio / timers: Remove alarm timers aio / timers: Add documentation and new format calls aio / timers: Use all timerlists in icount warp calculations aio / timers: Introduce new API timer_new and friends aio / timers: On timer modification, qemu_notify or aio_notify aio / timers: Convert mainloop to use timeout aio / timers: Convert aio_poll to use AioContext timers' deadline ... Message-id: 1377202298-22896-1-git-send-email-stefanha@redhat.com Signed-off-by: Anthony Liguori <anthony@codemonkey.ws>
Diffstat (limited to 'tests')
-rw-r--r--tests/libqtest.h24
-rw-r--r--tests/test-aio.c132
-rw-r--r--tests/test-thread-pool.c3
3 files changed, 147 insertions, 12 deletions
diff --git a/tests/libqtest.h b/tests/libqtest.h
index 0f6aade092..a6e99bd023 100644
--- a/tests/libqtest.h
+++ b/tests/libqtest.h
@@ -258,9 +258,9 @@ void qtest_memwrite(QTestState *s, uint64_t addr, const void *data, size_t size)
* qtest_clock_step_next:
* @s: #QTestState instance to operate on.
*
- * Advance the vm_clock to the next deadline.
+ * Advance the QEMU_CLOCK_VIRTUAL to the next deadline.
*
- * Returns: The current value of the vm_clock in nanoseconds.
+ * Returns: The current value of the QEMU_CLOCK_VIRTUAL in nanoseconds.
*/
int64_t qtest_clock_step_next(QTestState *s);
@@ -269,9 +269,9 @@ int64_t qtest_clock_step_next(QTestState *s);
* @s: QTestState instance to operate on.
* @step: Number of nanoseconds to advance the clock by.
*
- * Advance the vm_clock by @step nanoseconds.
+ * Advance the QEMU_CLOCK_VIRTUAL by @step nanoseconds.
*
- * Returns: The current value of the vm_clock in nanoseconds.
+ * Returns: The current value of the QEMU_CLOCK_VIRTUAL in nanoseconds.
*/
int64_t qtest_clock_step(QTestState *s, int64_t step);
@@ -280,9 +280,9 @@ int64_t qtest_clock_step(QTestState *s, int64_t step);
* @s: QTestState instance to operate on.
* @val: Nanoseconds value to advance the clock to.
*
- * Advance the vm_clock to @val nanoseconds since the VM was launched.
+ * Advance the QEMU_CLOCK_VIRTUAL to @val nanoseconds since the VM was launched.
*
- * Returns: The current value of the vm_clock in nanoseconds.
+ * Returns: The current value of the QEMU_CLOCK_VIRTUAL in nanoseconds.
*/
int64_t qtest_clock_set(QTestState *s, int64_t val);
@@ -584,9 +584,9 @@ static inline void memwrite(uint64_t addr, const void *data, size_t size)
/**
* clock_step_next:
*
- * Advance the vm_clock to the next deadline.
+ * Advance the QEMU_CLOCK_VIRTUAL to the next deadline.
*
- * Returns: The current value of the vm_clock in nanoseconds.
+ * Returns: The current value of the QEMU_CLOCK_VIRTUAL in nanoseconds.
*/
static inline int64_t clock_step_next(void)
{
@@ -597,9 +597,9 @@ static inline int64_t clock_step_next(void)
* clock_step:
* @step: Number of nanoseconds to advance the clock by.
*
- * Advance the vm_clock by @step nanoseconds.
+ * Advance the QEMU_CLOCK_VIRTUAL by @step nanoseconds.
*
- * Returns: The current value of the vm_clock in nanoseconds.
+ * Returns: The current value of the QEMU_CLOCK_VIRTUAL in nanoseconds.
*/
static inline int64_t clock_step(int64_t step)
{
@@ -610,9 +610,9 @@ static inline int64_t clock_step(int64_t step)
* clock_set:
* @val: Nanoseconds value to advance the clock to.
*
- * Advance the vm_clock to @val nanoseconds since the VM was launched.
+ * Advance the QEMU_CLOCK_VIRTUAL to @val nanoseconds since the VM was launched.
*
- * Returns: The current value of the vm_clock in nanoseconds.
+ * Returns: The current value of the QEMU_CLOCK_VIRTUAL in nanoseconds.
*/
static inline int64_t clock_set(int64_t val)
{
diff --git a/tests/test-aio.c b/tests/test-aio.c
index 1ab5637d95..07a1f61f87 100644
--- a/tests/test-aio.c
+++ b/tests/test-aio.c
@@ -12,6 +12,7 @@
#include <glib.h>
#include "block/aio.h"
+#include "qemu/timer.h"
AioContext *ctx;
@@ -46,6 +47,15 @@ typedef struct {
int max;
} BHTestData;
+typedef struct {
+ QEMUTimer timer;
+ QEMUClockType clock_type;
+ int n;
+ int max;
+ int64_t ns;
+ AioContext *ctx;
+} TimerTestData;
+
static void bh_test_cb(void *opaque)
{
BHTestData *data = opaque;
@@ -54,6 +64,19 @@ static void bh_test_cb(void *opaque)
}
}
+static void timer_test_cb(void *opaque)
+{
+ TimerTestData *data = opaque;
+ if (++data->n < data->max) {
+ timer_mod(&data->timer,
+ qemu_clock_get_ns(data->clock_type) + data->ns);
+ }
+}
+
+static void dummy_io_handler_read(void *opaque)
+{
+}
+
static void bh_delete_cb(void *opaque)
{
BHTestData *data = opaque;
@@ -342,6 +365,64 @@ static void test_wait_event_notifier_noflush(void)
event_notifier_cleanup(&data.e);
}
+static void test_timer_schedule(void)
+{
+ TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
+ .max = 2,
+ .clock_type = QEMU_CLOCK_VIRTUAL };
+ int pipefd[2];
+
+ /* aio_poll will not block to wait for timers to complete unless it has
+ * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
+ */
+ g_assert(!pipe2(pipefd, O_NONBLOCK));
+ aio_set_fd_handler(ctx, pipefd[0],
+ dummy_io_handler_read, NULL, NULL);
+ aio_poll(ctx, false);
+
+ aio_timer_init(ctx, &data.timer, data.clock_type,
+ SCALE_NS, timer_test_cb, &data);
+ timer_mod(&data.timer,
+ qemu_clock_get_ns(data.clock_type) +
+ data.ns);
+
+ g_assert_cmpint(data.n, ==, 0);
+
+ /* timer_mod may well cause an event notifer to have gone off,
+ * so clear that
+ */
+ do {} while (aio_poll(ctx, false));
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 0);
+
+ sleep(1);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ /* timer_mod called by our callback */
+ do {} while (aio_poll(ctx, false));
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ g_assert(aio_poll(ctx, true));
+ g_assert_cmpint(data.n, ==, 2);
+
+ /* As max is now 2, an event notifier should not have gone off */
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 2);
+
+ aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL);
+ close(pipefd[0]);
+ close(pipefd[1]);
+
+ timer_del(&data.timer);
+}
+
/* Now the same tests, using the context as a GSource. They are
* very similar to the ones above, with g_main_context_iteration
* replacing aio_poll. However:
@@ -624,12 +705,61 @@ static void test_source_wait_event_notifier_noflush(void)
event_notifier_cleanup(&data.e);
}
+static void test_source_timer_schedule(void)
+{
+ TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
+ .max = 2,
+ .clock_type = QEMU_CLOCK_VIRTUAL };
+ int pipefd[2];
+ int64_t expiry;
+
+ /* aio_poll will not block to wait for timers to complete unless it has
+ * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
+ */
+ g_assert(!pipe2(pipefd, O_NONBLOCK));
+ aio_set_fd_handler(ctx, pipefd[0],
+ dummy_io_handler_read, NULL, NULL);
+ do {} while (g_main_context_iteration(NULL, false));
+
+ aio_timer_init(ctx, &data.timer, data.clock_type,
+ SCALE_NS, timer_test_cb, &data);
+ expiry = qemu_clock_get_ns(data.clock_type) +
+ data.ns;
+ timer_mod(&data.timer, expiry);
+
+ g_assert_cmpint(data.n, ==, 0);
+
+ sleep(1);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ /* The comment above was not kidding when it said this wakes up itself */
+ do {
+ g_assert(g_main_context_iteration(NULL, true));
+ } while (qemu_clock_get_ns(data.clock_type) <= expiry);
+ sleep(1);
+ g_main_context_iteration(NULL, false);
+
+ g_assert_cmpint(data.n, ==, 2);
+
+ aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL);
+ close(pipefd[0]);
+ close(pipefd[1]);
+
+ timer_del(&data.timer);
+}
+
+
/* End of tests. */
int main(int argc, char **argv)
{
GSource *src;
+ init_clocks();
+
ctx = aio_context_new();
src = aio_get_g_source(ctx);
g_source_attach(src, NULL);
@@ -650,6 +780,7 @@ int main(int argc, char **argv)
g_test_add_func("/aio/event/wait", test_wait_event_notifier);
g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
+ g_test_add_func("/aio/timer/schedule", test_timer_schedule);
g_test_add_func("/aio-gsource/notify", test_source_notify);
g_test_add_func("/aio-gsource/flush", test_source_flush);
@@ -664,5 +795,6 @@ int main(int argc, char **argv)
g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier);
g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush);
g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier);
+ g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule);
return g_test_run();
}
diff --git a/tests/test-thread-pool.c b/tests/test-thread-pool.c
index 8188d1a69d..c1f8e13a9f 100644
--- a/tests/test-thread-pool.c
+++ b/tests/test-thread-pool.c
@@ -3,6 +3,7 @@
#include "block/aio.h"
#include "block/thread-pool.h"
#include "block/block.h"
+#include "qemu/timer.h"
static AioContext *ctx;
static ThreadPool *pool;
@@ -205,6 +206,8 @@ int main(int argc, char **argv)
{
int ret;
+ init_clocks();
+
ctx = aio_context_new();
pool = aio_get_thread_pool(ctx);