qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC] [PATCHv3 12/12] aio / timers: Add test harness for Ai


From: Alex Bligh
Subject: [Qemu-devel] [RFC] [PATCHv3 12/12] aio / timers: Add test harness for AioContext timers
Date: Thu, 25 Jul 2013 23:16:48 +0100

Add a test harness for AioContext timers. The g_source equivalent is
unsatisfactory as it suffers from false wakeups.

Signed-off-by: Alex Bligh <address@hidden>
---
 tests/test-aio.c |  122 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 122 insertions(+)

diff --git a/tests/test-aio.c b/tests/test-aio.c
index c173870..71841c0 100644
--- a/tests/test-aio.c
+++ b/tests/test-aio.c
@@ -12,6 +12,7 @@
 
 #include <glib.h>
 #include "block/aio.h"
+#include "qemu/timer.h"
 
 AioContext *ctx;
 
@@ -31,6 +32,15 @@ typedef struct {
     int max;
 } BHTestData;
 
+typedef struct {
+    QEMUTimer *timer;
+    QEMUClock *clock;
+    int n;
+    int max;
+    int64_t ns;
+    AioContext *ctx;
+} TimerTestData;
+
 static void bh_test_cb(void *opaque)
 {
     BHTestData *data = opaque;
@@ -39,6 +49,24 @@ static void bh_test_cb(void *opaque)
     }
 }
 
+static void timer_test_cb(void *opaque)
+{
+    TimerTestData *data = opaque;
+    if (++data->n < data->max) {
+        qemu_mod_timer(data->timer,
+                       qemu_get_clock_ns(data->clock) + data->ns);
+    }
+}
+
+static void dummy_io_handler_read(void *opaque)
+{
+}
+
+static int dummy_io_handler_flush(void *opaque)
+{
+    return 1;
+}
+
 static void bh_delete_cb(void *opaque)
 {
     BHTestData *data = opaque;
@@ -340,6 +368,51 @@ static void test_wait_event_notifier_noflush(void)
     event_notifier_cleanup(&data.e);
 }
 
+static void test_timer_schedule(void)
+{
+    TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS*750,
+                           .max = 2, .clock = ctx->clock };
+    int pipefd[2];
+
+    /* aio_poll will not block to wait for timers to complete unless it has
+     * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
+     */
+    g_assert(!pipe2(pipefd, O_NONBLOCK));
+    aio_set_fd_handler(ctx, pipefd[0],
+                       dummy_io_handler_read, NULL, dummy_io_handler_flush,
+                       NULL);
+    aio_poll(ctx, false);
+
+    data.timer = qemu_new_timer_ns(data.clock, timer_test_cb, &data);
+    qemu_mod_timer(data.timer, qemu_get_clock_ns(data.clock) + data.ns);
+
+    g_assert_cmpint(data.n, ==, 0);
+
+    g_assert(!aio_poll(ctx, false));
+    g_assert_cmpint(data.n, ==, 0);
+
+    sleep(1);
+    g_assert_cmpint(data.n, ==, 0);
+
+    g_assert(aio_poll(ctx, false));
+    g_assert_cmpint(data.n, ==, 1);
+
+    g_assert(!aio_poll(ctx, false));
+    g_assert_cmpint(data.n, ==, 1);
+
+    g_assert(aio_poll(ctx, true));
+    g_assert_cmpint(data.n, ==, 2);
+
+    g_assert(!aio_poll(ctx, false));
+    g_assert_cmpint(data.n, ==, 2);
+
+    aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL, NULL);
+    close(pipefd[0]);
+    close(pipefd[1]);
+
+    qemu_del_timer(data.timer);
+}
+
 /* Now the same tests, using the context as a GSource.  They are
  * very similar to the ones above, with g_main_context_iteration
  * replacing aio_poll.  However:
@@ -622,12 +695,59 @@ static void test_source_wait_event_notifier_noflush(void)
     event_notifier_cleanup(&data.e);
 }
 
+static void test_source_timer_schedule(void)
+{
+    TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS*750,
+                           .max = 2, .clock = ctx->clock };
+    int pipefd[2];
+    int64_t expiry;
+
+    /* aio_poll will not block to wait for timers to complete unless it has
+     * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
+     */
+    g_assert(!pipe2(pipefd, O_NONBLOCK));
+    aio_set_fd_handler(ctx, pipefd[0],
+                       dummy_io_handler_read, NULL, dummy_io_handler_flush,
+                       NULL);
+    while (g_main_context_iteration(NULL, false));
+
+    data.timer = qemu_new_timer_ns(data.clock, timer_test_cb, &data);
+    expiry = qemu_get_clock_ns(data.clock) + data.ns;
+    qemu_mod_timer(data.timer, expiry);
+
+    g_assert_cmpint(data.n, ==, 0);
+
+    sleep(1);
+    g_assert_cmpint(data.n, ==, 0);
+
+    g_assert(g_main_context_iteration(NULL, false));
+    g_assert_cmpint(data.n, ==, 1);
+
+    /* The comment above was not kidding when it said this wakes up itself */
+    do {
+        g_assert(g_main_context_iteration(NULL, true));
+    } while (qemu_get_clock_ns(data.clock) <= expiry);
+    sleep(1);
+    g_main_context_iteration(NULL, false);
+
+    g_assert_cmpint(data.n, ==, 2);
+
+    aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL, NULL);
+    close(pipefd[0]);
+    close(pipefd[1]);
+
+    qemu_del_timer(data.timer);
+}
+
+
 /* End of tests.  */
 
 int main(int argc, char **argv)
 {
     GSource *src;
 
+    init_clocks();
+
     ctx = aio_context_new();
     src = aio_get_g_source(ctx);
     g_source_attach(src, NULL);
@@ -648,6 +768,7 @@ int main(int argc, char **argv)
     g_test_add_func("/aio/event/wait",              test_wait_event_notifier);
     g_test_add_func("/aio/event/wait/no-flush-cb",  
test_wait_event_notifier_noflush);
     g_test_add_func("/aio/event/flush",             test_flush_event_notifier);
+    g_test_add_func("/aio/timer/schedule",          test_timer_schedule);
 
     g_test_add_func("/aio-gsource/notify",                  
test_source_notify);
     g_test_add_func("/aio-gsource/flush",                   test_source_flush);
@@ -662,5 +783,6 @@ int main(int argc, char **argv)
     g_test_add_func("/aio-gsource/event/wait",              
test_source_wait_event_notifier);
     g_test_add_func("/aio-gsource/event/wait/no-flush-cb",  
test_source_wait_event_notifier_noflush);
     g_test_add_func("/aio-gsource/event/flush",             
test_source_flush_event_notifier);
+    g_test_add_func("/aio-gsource/timer/schedule",          
test_source_timer_schedule);
     return g_test_run();
 }
-- 
1.7.9.5




reply via email to

[Prev in Thread] Current Thread [Next in Thread]