qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-block] [PULL 14/76] AioContext: acquire/release AioContext during


From: Kevin Wolf
Subject: [Qemu-block] [PULL 14/76] AioContext: acquire/release AioContext during aio_poll
Date: Tue, 28 Apr 2015 16:59:56 +0200

From: Paolo Bonzini <address@hidden>

This is the first step in pushing down acquire/release, and will let
rfifolock drop the contention callback feature.

Signed-off-by: Paolo Bonzini <address@hidden>
Reviewed-by: Stefan Hajnoczi <address@hidden>
Message-id: address@hidden
Signed-off-by: Stefan Hajnoczi <address@hidden>
Signed-off-by: Kevin Wolf <address@hidden>
---
 aio-posix.c         |  9 +++++++++
 aio-win32.c         |  8 ++++++++
 include/block/aio.h | 13 +++++++------
 3 files changed, 24 insertions(+), 6 deletions(-)

diff --git a/aio-posix.c b/aio-posix.c
index 296cd9b..4abec38 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -238,6 +238,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
     bool progress;
     int64_t timeout;
 
+    aio_context_acquire(ctx);
     was_dispatching = ctx->dispatching;
     progress = false;
 
@@ -267,7 +268,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
     timeout = blocking ? aio_compute_timeout(ctx) : 0;
 
     /* wait until next event */
+    if (timeout) {
+        aio_context_release(ctx);
+    }
     ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
+    if (timeout) {
+        aio_context_acquire(ctx);
+    }
 
     /* if we have any readable fds, dispatch event */
     if (ret > 0) {
@@ -286,5 +293,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
     }
 
     aio_set_dispatching(ctx, was_dispatching);
+    aio_context_release(ctx);
+
     return progress;
 }
diff --git a/aio-win32.c b/aio-win32.c
index e6f4ced..233d8f5 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -283,6 +283,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
     int count;
     int timeout;
 
+    aio_context_acquire(ctx);
     have_select_revents = aio_prepare(ctx);
     if (have_select_revents) {
         blocking = false;
@@ -323,7 +324,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
 
         timeout = blocking
             ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
+        if (timeout) {
+            aio_context_release(ctx);
+        }
         ret = WaitForMultipleObjects(count, events, FALSE, timeout);
+        if (timeout) {
+            aio_context_acquire(ctx);
+        }
         aio_set_dispatching(ctx, true);
 
         if (first && aio_bh_poll(ctx)) {
@@ -349,5 +356,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
     progress |= timerlistgroup_run_timers(&ctx->tlg);
 
     aio_set_dispatching(ctx, was_dispatching);
+    aio_context_release(ctx);
     return progress;
 }
diff --git a/include/block/aio.h b/include/block/aio.h
index 0dc7a25..d2bb423 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -118,13 +118,14 @@ void aio_context_ref(AioContext *ctx);
 void aio_context_unref(AioContext *ctx);
 
 /* Take ownership of the AioContext.  If the AioContext will be shared between
- * threads, a thread must have ownership when calling aio_poll().
+ * threads, and a thread does not want to be interrupted, it will have to
+ * take ownership around calls to aio_poll().  Otherwise, aio_poll()
+ * automatically takes care of calling aio_context_acquire and
+ * aio_context_release.
  *
- * Note that multiple threads calling aio_poll() means timers, BHs, and
- * callbacks may be invoked from a different thread than they were registered
- * from.  Therefore, code must use AioContext acquire/release or use
- * fine-grained synchronization to protect shared state if other threads will
- * be accessing it simultaneously.
+ * Access to timers and BHs from a thread that has not acquired AioContext
+ * is possible.  Access to callbacks for now must be done while the AioContext
+ * is owned by the thread (FIXME).
  */
 void aio_context_acquire(AioContext *ctx);
 
-- 
1.8.3.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]