qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCHv2] [RFC] aio/async: Add timed bottom-halves


From: Alex Bligh
Subject: [Qemu-devel] [PATCHv2] [RFC] aio/async: Add timed bottom-halves
Date: Sat, 6 Jul 2013 19:04:16 +0100

Add timed bottom halves. A timed bottom half is a bottom half that
will not execute until a given time has passed (qemu_bh_schedule_at)
or a given interval has passed (qemu_bh_schedule_in). Any qemu
clock can be used, and times are specified in nanoseconds.

Timed bottom halves can be used where timers cannot. For instance,
in block drivers where there is no mainloop that calls timers
(qemu-nbd, qemu-img), or where (per address@hidden) the
aio code loops internally and thus timers never get called.

Changes since v1:
* aio_ctx_prepare should cope with wait<0
* aio_ctx_prepare should round up wait time

Signed-off-by: Alex Bligh <address@hidden>
---
 async.c             |   53 +++++++++++++++++++++++++++++++++++++++++++++------
 include/block/aio.h |   33 ++++++++++++++++++++++++++++++++
 tests/test-aio.c    |   47 +++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 127 insertions(+), 6 deletions(-)

diff --git a/async.c b/async.c
index 90fe906..d93b1ab 100644
--- a/async.c
+++ b/async.c
@@ -35,6 +35,8 @@ struct QEMUBH {
     QEMUBHFunc *cb;
     void *opaque;
     QEMUBH *next;
+    QEMUClock *clock;
+    int64_t time;
     bool scheduled;
     bool idle;
     bool deleted;
@@ -52,6 +54,11 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void 
*opaque)
     return bh;
 }
 
+static inline int64_t qemu_bh_ready_in(QEMUBH *bh)
+{
+    return (bh->clock) ? (bh->time - qemu_get_clock_ns(bh->clock)) : 0;
+}
+
 int aio_bh_poll(AioContext *ctx)
 {
     QEMUBH *bh, **bhp, *next;
@@ -62,8 +69,10 @@ int aio_bh_poll(AioContext *ctx)
     ret = 0;
     for (bh = ctx->first_bh; bh; bh = next) {
         next = bh->next;
-        if (!bh->deleted && bh->scheduled) {
+        if (!bh->deleted && bh->scheduled && qemu_bh_ready_in(bh) <= 0) {
             bh->scheduled = 0;
+            bh->clock = NULL;
+            bh->time = 0;
             if (!bh->idle)
                 ret = 1;
             bh->idle = 0;
@@ -96,6 +105,8 @@ void qemu_bh_schedule_idle(QEMUBH *bh)
         return;
     bh->scheduled = 1;
     bh->idle = 1;
+    bh->clock = NULL;
+    bh->time = 0;
 }
 
 void qemu_bh_schedule(QEMUBH *bh)
@@ -104,18 +115,39 @@ void qemu_bh_schedule(QEMUBH *bh)
         return;
     bh->scheduled = 1;
     bh->idle = 0;
+    bh->clock = NULL;
+    bh->time = 0;
     aio_notify(bh->ctx);
 }
 
+void qemu_bh_schedule_at(QEMUBH *bh, QEMUClock *clock, int64_t time)
+{
+    /* Allow rescheduling if already scheduled */
+    bh->scheduled = 1;
+    bh->idle = 0;
+    bh->clock = clock;
+    bh->time = time;
+    aio_notify(bh->ctx); /*FIXME: is this right?*/
+}
+
+void qemu_bh_schedule_in(QEMUBH *bh, QEMUClock *clock, int64_t time)
+{
+    qemu_bh_schedule_at(bh, clock, qemu_get_clock_ns(clock) + time);
+}
+
 void qemu_bh_cancel(QEMUBH *bh)
 {
     bh->scheduled = 0;
+    bh->clock = NULL;
+    bh->time = 0;
 }
 
 void qemu_bh_delete(QEMUBH *bh)
 {
     bh->scheduled = 0;
     bh->deleted = 1;
+    bh->clock = NULL;
+    bh->time = 0;
 }
 
 static gboolean
@@ -126,13 +158,22 @@ aio_ctx_prepare(GSource *source, gint    *timeout)
 
     for (bh = ctx->first_bh; bh; bh = bh->next) {
         if (!bh->deleted && bh->scheduled) {
-            if (bh->idle) {
+            int64_t wait = qemu_bh_ready_in(bh) / SCALE_MS;
+            if (!wait && bh->idle) {
                 /* idle bottom halves will be polled at least
                  * every 10ms */
-                *timeout = 10;
+                wait = 10;
+            }
+            if (wait) {
+                /* Use the minimum wait across all bottom
+                 * halves */
+                if (*timeout == -1 || *timeout > wait) {
+                    *timeout = wait;
+                }
             } else {
-                /* non-idle bottom halves will be executed
-                 * immediately */
+                /* non-idle bottom halves or timed bottom
+                 * halves which are ready to run will be
+                 * executed immediately */
                 *timeout = 0;
                 return true;
             }
@@ -149,7 +190,7 @@ aio_ctx_check(GSource *source)
     QEMUBH *bh;
 
     for (bh = ctx->first_bh; bh; bh = bh->next) {
-        if (!bh->deleted && bh->scheduled) {
+        if (!bh->deleted && bh->scheduled && qemu_bh_ready_in(bh) <= 0) {
             return true;
        }
     }
diff --git a/include/block/aio.h b/include/block/aio.h
index 1836793..ff26a3b 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -20,6 +20,7 @@
 
 typedef struct BlockDriverAIOCB BlockDriverAIOCB;
 typedef void BlockDriverCompletionFunc(void *opaque, int ret);
+typedef struct QEMUClock QEMUClock;
 
 typedef struct AIOCBInfo {
     void (*cancel)(BlockDriverAIOCB *acb);
@@ -145,6 +146,38 @@ int aio_bh_poll(AioContext *ctx);
 void qemu_bh_schedule(QEMUBH *bh);
 
 /**
+ * qemu_bh_schedule_at: Schedule a bottom half at a future time
+ *
+ * Scheduling a bottom half interrupts the main loop and causes the
+ * execution of the callback that was passed to qemu_bh_new.
+ *
+ * Bottom halves that are scheduled from a bottom half handler are instantly
+ * invoked.  This can create an infinite loop if a bottom half handler
+ * schedules itself.
+ *
+ * @bh: The bottom half to be scheduled.
+ * @clock: The clock to be used
+ * @time: The time in nanoseconds at which the bh is scheduled
+ */
+void qemu_bh_schedule_at(QEMUBH *bh, QEMUClock *clock, int64_t time);
+
+/**
+ * qemu_bh_schedule_in: Schedule a bottom half after an interval
+ *
+ * Scheduling a bottom half interrupts the main loop and causes the
+ * execution of the callback that was passed to qemu_bh_new.
+ *
+ * Bottom halves that are scheduled from a bottom half handler are instantly
+ * invoked.  This can create an infinite loop if a bottom half handler
+ * schedules itself.
+ *
+ * @bh: The bottom half to be scheduled.
+ * @clock: The clock to be used
+ * @time: The interval in nanoseconds after which the bh is scheduled
+ */
+void qemu_bh_schedule_in(QEMUBH *bh, QEMUClock *clock, int64_t time);
+
+/**
  * qemu_bh_cancel: Cancel execution of a bottom half.
  *
  * Canceling execution of a bottom half undoes the effect of calls to
diff --git a/tests/test-aio.c b/tests/test-aio.c
index c173870..9352242 100644
--- a/tests/test-aio.c
+++ b/tests/test-aio.c
@@ -12,6 +12,7 @@
 
 #include <glib.h>
 #include "block/aio.h"
+#include "qemu/timer.h"
 
 AioContext *ctx;
 
@@ -124,6 +125,27 @@ static void test_bh_schedule10(void)
     qemu_bh_delete(data.bh);
 }
 
+static void test_bh_schedule_in(void)
+{
+    BHTestData data = { .n = 0 };
+    data.bh = aio_bh_new(ctx, bh_test_cb, &data);
+
+    qemu_bh_schedule_in(data.bh, rt_clock, 1000000000LL);
+    g_assert_cmpint(data.n, ==, 0);
+
+    g_assert(!aio_poll(ctx, false));
+    g_assert_cmpint(data.n, ==, 0);
+
+    sleep(2);
+
+    g_assert(aio_poll(ctx, true));
+    g_assert_cmpint(data.n, ==, 1);
+
+    g_assert(!aio_poll(ctx, false));
+    g_assert_cmpint(data.n, ==, 1);
+    qemu_bh_delete(data.bh);
+}
+
 static void test_bh_cancel(void)
 {
     BHTestData data = { .n = 0 };
@@ -407,6 +429,27 @@ static void test_source_bh_schedule10(void)
     qemu_bh_delete(data.bh);
 }
 
+static void test_source_bh_schedule_in(void)
+{
+    BHTestData data = { .n = 0 };
+    data.bh = aio_bh_new(ctx, bh_test_cb, &data);
+
+    qemu_bh_schedule_in(data.bh, rt_clock, 1000000000LL);
+    g_assert_cmpint(data.n, ==, 0);
+
+    g_assert(g_main_context_iteration(NULL, true));
+    g_assert_cmpint(data.n, ==, 0);
+
+    sleep(2);
+
+    g_assert(g_main_context_iteration(NULL, true));
+    g_assert_cmpint(data.n, ==, 1);
+
+    g_assert(!g_main_context_iteration(NULL, false));
+    g_assert_cmpint(data.n, ==, 1);
+    qemu_bh_delete(data.bh);
+}
+
 static void test_source_bh_cancel(void)
 {
     BHTestData data = { .n = 0 };
@@ -628,6 +671,8 @@ int main(int argc, char **argv)
 {
     GSource *src;
 
+    init_clocks();
+
     ctx = aio_context_new();
     src = aio_get_g_source(ctx);
     g_source_attach(src, NULL);
@@ -639,6 +684,7 @@ int main(int argc, char **argv)
     g_test_add_func("/aio/notify",                  test_notify);
     g_test_add_func("/aio/bh/schedule",             test_bh_schedule);
     g_test_add_func("/aio/bh/schedule10",           test_bh_schedule10);
+    g_test_add_func("/aio/bh/schedule-in",          test_bh_schedule_in);
     g_test_add_func("/aio/bh/cancel",               test_bh_cancel);
     g_test_add_func("/aio/bh/delete",               test_bh_delete);
     g_test_add_func("/aio/bh/callback-delete/one",  test_bh_delete_from_cb);
@@ -653,6 +699,7 @@ int main(int argc, char **argv)
     g_test_add_func("/aio-gsource/flush",                   test_source_flush);
     g_test_add_func("/aio-gsource/bh/schedule",             
test_source_bh_schedule);
     g_test_add_func("/aio-gsource/bh/schedule10",           
test_source_bh_schedule10);
+    g_test_add_func("/aio-gsource/bh/schedule-in",          
test_source_bh_schedule_in);
     g_test_add_func("/aio-gsource/bh/cancel",               
test_source_bh_cancel);
     g_test_add_func("/aio-gsource/bh/delete",               
test_source_bh_delete);
     g_test_add_func("/aio-gsource/bh/callback-delete/one",  
test_source_bh_delete_from_cb);
-- 
1.7.9.5




reply via email to

[Prev in Thread] Current Thread [Next in Thread]