qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v3 4/5] [RFC] libqblock, implemention minor


From: Wenchao Xia
Subject: [Qemu-devel] [PATCH v3 4/5] [RFC] libqblock, implemention minor
Date: Wed, 29 Aug 2012 19:05:47 +0800

  Simply exposed a block.c API, and added a AIO check function.

Signed-off-by: Wenchao Xia <address@hidden>
---
 aio.c      |   95 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 block.c    |    2 +-
 block.h    |    1 +
 qemu-aio.h |    1 +
 4 files changed, 98 insertions(+), 1 deletions(-)

diff --git a/aio.c b/aio.c
index 0a9eb10..dbe0f6f 100644
--- a/aio.c
+++ b/aio.c
@@ -192,3 +192,98 @@ bool qemu_aio_wait(void)
 
     return true;
 }
+
+
+bool qemu_aio_check(void)
+{
+    AioHandler *node;
+    fd_set rdfds, wrfds;
+    int max_fd = -1;
+    int ret;
+    bool busy;
+    struct timeval tv;
+
+    walking_handlers = 1;
+
+    FD_ZERO(&rdfds);
+    FD_ZERO(&wrfds);
+
+    /* fill fd sets */
+    busy = false;
+    QLIST_FOREACH(node, &aio_handlers, node) {
+        /* If there aren't pending AIO operations, don't invoke callbacks.
+         * Otherwise, if there are no AIO requests, qemu_aio_wait() would
+         * wait indefinitely.
+         */
+        if (node->io_flush) {
+            if (node->io_flush(node->opaque) == 0) {
+                continue;
+            }
+            busy = true;
+        }
+        if (!node->deleted && node->io_read) {
+            FD_SET(node->fd, &rdfds);
+            max_fd = MAX(max_fd, node->fd + 1);
+        }
+        if (!node->deleted && node->io_write) {
+            FD_SET(node->fd, &wrfds);
+            max_fd = MAX(max_fd, node->fd + 1);
+        }
+    }
+
+    walking_handlers = 0;
+
+    /* No AIO operations?  Get us out of here */
+    if (!busy) {
+        return false;
+    }
+
+    tv.tv_sec = 0;
+    tv.tv_usec = 0;
+    /* wait until next event */
+    ret = select(max_fd, &rdfds, &wrfds, NULL, &tv);
+
+    /* if we have any readable fds, dispatch event */
+    if (ret > 0) {
+        walking_handlers = 1;
+
+        /* we have to walk very carefully in case
+         * qemu_aio_set_fd_handler is called while we're walking */
+        node = QLIST_FIRST(&aio_handlers);
+        while (node) {
+            AioHandler *tmp;
+
+            if (!node->deleted &&
+                FD_ISSET(node->fd, &rdfds) &&
+                node->io_read) {
+                node->io_read(node->opaque);
+            }
+            if (!node->deleted &&
+                FD_ISSET(node->fd, &wrfds) &&
+                node->io_write) {
+                node->io_write(node->opaque);
+            }
+
+            tmp = node;
+            node = QLIST_NEXT(node, node);
+
+            if (tmp->deleted) {
+                QLIST_REMOVE(tmp, node);
+                g_free(tmp);
+            }
+        }
+
+        walking_handlers = 0;
+    }
+
+    /*
+     * If there are callbacks left that have been queued, we need to call then.
+     * Do not call select in this case, because it is possible that the caller
+     * does not need a complete flush (as is the case for qemu_aio_wait loops).
+     */
+    if (qemu_bh_poll()) {
+        return true;
+    }
+
+    return true;
+}
diff --git a/block.c b/block.c
index b38940b..d30f363 100644
--- a/block.c
+++ b/block.c
@@ -196,7 +196,7 @@ static void bdrv_io_limits_intercept(BlockDriverState *bs,
 }
 
 /* check if the path starts with "<protocol>:" */
-static int path_has_protocol(const char *path)
+int path_has_protocol(const char *path)
 {
     const char *p;
 
diff --git a/block.h b/block.h
index c89590d..3aca2fd 100644
--- a/block.h
+++ b/block.h
@@ -403,4 +403,5 @@ typedef enum {
 #define BLKDBG_EVENT(bs, evt) bdrv_debug_event(bs, evt)
 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event);
 
+int path_has_protocol(const char *path);
 #endif
diff --git a/qemu-aio.h b/qemu-aio.h
index bfdd35f..b34eb16 100644
--- a/qemu-aio.h
+++ b/qemu-aio.h
@@ -66,4 +66,5 @@ int qemu_aio_set_fd_handler(int fd,
                             AioFlushHandler *io_flush,
                             void *opaque);
 
+bool qemu_aio_check(void);
 #endif
-- 
1.7.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]