[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH v6 06/18] aio: stop using .io_flush()
From: |
Jeff Cody |
Subject: |
Re: [Qemu-devel] [PATCH v6 06/18] aio: stop using .io_flush() |
Date: |
Fri, 26 Jul 2013 12:10:01 -0400 |
User-agent: |
Mutt/1.5.21 (2010-09-15) |
On Thu, Jul 25, 2013 at 05:18:13PM +0200, Stefan Hajnoczi wrote:
> Now that aio_poll() users check their termination condition themselves,
> it is no longer necessary to call .io_flush() handlers.
>
> The behavior of aio_poll() changes as follows:
>
> 1. .io_flush() is no longer invoked and file descriptors are *always*
> monitored. Previously returning 0 from .io_flush() would skip this file
> descriptor.
>
> Due to this change it is essential to check that requests are pending
> before calling qemu_aio_wait(). Failure to do so means we block, for
> example, waiting for an idle iSCSI socket to become readable when there
> are no requests. Currently all qemu_aio_wait()/aio_poll() callers check
> before calling.
>
> 2. aio_poll() now returns true if progress was made (BH or fd handlers
> executed) and false otherwise. Previously it would return true whenever
> 'busy', which means that .io_flush() returned true. The 'busy' concept
> no longer exists so just progress is returned.
>
> Due to this change we need to update tests/test-aio.c which asserts
> aio_poll() return values. Note that QEMU doesn't actually rely on these
> return values so only tests/test-aio.c cares.
>
> Note that ctx->notifier, the EventNotifier fd used for aio_notify(), is
> now handled as a special case. This is a little ugly but maintains
> aio_poll() semantics, i.e. aio_notify() does not count as 'progress' and
> aio_poll() avoids blocking when the user has not set any fd handlers yet.
>
> Patches after this remove .io_flush() handler code until we can finally
> drop the io_flush arguments to aio_set_fd_handler() and friends.
>
> Reviewed-by: Paolo Bonzini <address@hidden>
> Signed-off-by: Stefan Hajnoczi <address@hidden>
> ---
> aio-posix.c | 29 +++++++++--------------------
> aio-win32.c | 34 ++++++++++++++--------------------
> tests/test-aio.c | 10 +++++-----
> 3 files changed, 28 insertions(+), 45 deletions(-)
>
> diff --git a/aio-posix.c b/aio-posix.c
> index b68eccd..7d66048 100644
> --- a/aio-posix.c
> +++ b/aio-posix.c
> @@ -23,7 +23,6 @@ struct AioHandler
> GPollFD pfd;
> IOHandler *io_read;
> IOHandler *io_write;
> - AioFlushHandler *io_flush;
> int deleted;
> int pollfds_idx;
> void *opaque;
> @@ -84,7 +83,6 @@ void aio_set_fd_handler(AioContext *ctx,
> /* Update handler with latest information */
> node->io_read = io_read;
> node->io_write = io_write;
> - node->io_flush = io_flush;
> node->opaque = opaque;
> node->pollfds_idx = -1;
>
> @@ -147,7 +145,11 @@ static bool aio_dispatch(AioContext *ctx)
> (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
> node->io_read) {
> node->io_read(node->opaque);
> - progress = true;
> +
> + /* aio_notify() does not count as progress */
> + if (node->opaque != &ctx->notifier) {
> + progress = true;
> + }
> }
> if (!node->deleted &&
> (revents & (G_IO_OUT | G_IO_ERR)) &&
> @@ -173,7 +175,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
> {
> AioHandler *node;
> int ret;
> - bool busy, progress;
> + bool progress;
>
> progress = false;
>
> @@ -200,20 +202,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
> g_array_set_size(ctx->pollfds, 0);
>
> /* fill pollfds */
> - busy = false;
> QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> node->pollfds_idx = -1;
> -
> - /* If there aren't pending AIO operations, don't invoke callbacks.
> - * Otherwise, if there are no AIO requests, qemu_aio_wait() would
> - * wait indefinitely.
> - */
> - if (!node->deleted && node->io_flush) {
> - if (node->io_flush(node->opaque) == 0) {
> - continue;
> - }
> - busy = true;
> - }
> if (!node->deleted && node->pfd.events) {
> GPollFD pfd = {
> .fd = node->pfd.fd,
> @@ -226,8 +216,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
>
> ctx->walking_handlers--;
>
> - /* No AIO operations? Get us out of here */
> - if (!busy) {
> + /* early return if we only have the aio_notify() fd */
> + if (ctx->pollfds->len == 1) {
> return progress;
> }
>
> @@ -250,6 +240,5 @@ bool aio_poll(AioContext *ctx, bool blocking)
> }
> }
>
> - assert(progress || busy);
> - return true;
> + return progress;
> }
> diff --git a/aio-win32.c b/aio-win32.c
> index 38723bf..4309c16 100644
> --- a/aio-win32.c
> +++ b/aio-win32.c
> @@ -23,7 +23,6 @@
> struct AioHandler {
> EventNotifier *e;
> EventNotifierHandler *io_notify;
> - AioFlushEventNotifierHandler *io_flush;
> GPollFD pfd;
> int deleted;
> QLIST_ENTRY(AioHandler) node;
> @@ -73,7 +72,6 @@ void aio_set_event_notifier(AioContext *ctx,
> }
> /* Update handler with latest information */
> node->io_notify = io_notify;
> - node->io_flush = io_flush;
> }
>
> aio_notify(ctx);
> @@ -96,7 +94,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
> {
> AioHandler *node;
> HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
> - bool busy, progress;
> + bool progress;
> int count;
>
> progress = false;
> @@ -126,7 +124,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
> if (node->pfd.revents && node->io_notify) {
> node->pfd.revents = 0;
> node->io_notify(node->e);
> - progress = true;
> +
> + /* aio_notify() does not count as progress */
> + if (node->opaque != &ctx->notifier) {
> + progress = true;
> + }
> }
>
> tmp = node;
> @@ -147,19 +149,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
> ctx->walking_handlers++;
>
> /* fill fd sets */
> - busy = false;
> count = 0;
> QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> - /* If there aren't pending AIO operations, don't invoke callbacks.
> - * Otherwise, if there are no AIO requests, qemu_aio_wait() would
> - * wait indefinitely.
> - */
> - if (!node->deleted && node->io_flush) {
> - if (node->io_flush(node->e) == 0) {
> - continue;
> - }
> - busy = true;
> - }
> if (!node->deleted && node->io_notify) {
> events[count++] = event_notifier_get_handle(node->e);
> }
> @@ -167,8 +158,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
>
> ctx->walking_handlers--;
>
> - /* No AIO operations? Get us out of here */
> - if (!busy) {
> + /* early return if we only have the aio_notify() fd */
> + if (count == 1) {
> return progress;
> }
Minor point, but could we simplify it a bit if the above 3 lines were
removed, and the following while() case (not shown in diff context)
was just changed to while (count > 1) instead of while(count > 0).
I.e.:
- /* No AIO operations? Get us out of here */
- if (!busy) {
- return progress;
- }
- /* wait until next event */
- while (count > 0) {
+ /* wait until next event that is not aio_notify() */
+ while (count > 1) {
This would assume of course that aio_notify() is always first in the
list.
>
> @@ -196,7 +187,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
> event_notifier_get_handle(node->e) == events[ret -
> WAIT_OBJECT_0] &&
> node->io_notify) {
> node->io_notify(node->e);
> - progress = true;
> +
> + /* aio_notify() does not count as progress */
> + if (node->opaque != &ctx->notifier) {
> + progress = true;
> + }
^ We could then also drop this part of the patch, too, right?
> }
>
> tmp = node;
> @@ -214,6 +209,5 @@ bool aio_poll(AioContext *ctx, bool blocking)
> events[ret - WAIT_OBJECT_0] = events[--count];
> }
>
> - assert(progress || busy);
> - return true;
> + return progress;
> }
> diff --git a/tests/test-aio.c b/tests/test-aio.c
> index 20bf5e6..1251952 100644
> --- a/tests/test-aio.c
> +++ b/tests/test-aio.c
> @@ -254,7 +254,7 @@ static void test_wait_event_notifier(void)
> EventNotifierTestData data = { .n = 0, .active = 1 };
> event_notifier_init(&data.e, false);
> aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
> - g_assert(aio_poll(ctx, false));
> + g_assert(!aio_poll(ctx, false));
> g_assert_cmpint(data.n, ==, 0);
> g_assert_cmpint(data.active, ==, 1);
>
> @@ -279,7 +279,7 @@ static void test_flush_event_notifier(void)
> EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
> event_notifier_init(&data.e, false);
> aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
> - g_assert(aio_poll(ctx, false));
> + g_assert(!aio_poll(ctx, false));
> g_assert_cmpint(data.n, ==, 0);
> g_assert_cmpint(data.active, ==, 10);
>
> @@ -313,7 +313,7 @@ static void test_wait_event_notifier_noflush(void)
> /* Until there is an active descriptor, aio_poll may or may not call
> * event_ready_cb. Still, it must not block. */
> event_notifier_set(&data.e);
> - g_assert(!aio_poll(ctx, true));
> + g_assert(aio_poll(ctx, true));
> data.n = 0;
>
> /* An active event notifier forces aio_poll to look at EventNotifiers.
> */
> @@ -323,13 +323,13 @@ static void test_wait_event_notifier_noflush(void)
> event_notifier_set(&data.e);
> g_assert(aio_poll(ctx, false));
> g_assert_cmpint(data.n, ==, 1);
> - g_assert(aio_poll(ctx, false));
> + g_assert(!aio_poll(ctx, false));
> g_assert_cmpint(data.n, ==, 1);
>
> event_notifier_set(&data.e);
> g_assert(aio_poll(ctx, false));
> g_assert_cmpint(data.n, ==, 2);
> - g_assert(aio_poll(ctx, false));
> + g_assert(!aio_poll(ctx, false));
> g_assert_cmpint(data.n, ==, 2);
>
> event_notifier_set(&dummy.e);
> --
> 1.8.1.4
>
>
- [Qemu-devel] [PATCH v6 00/18] aio: drop io_flush(), Stefan Hajnoczi, 2013/07/25
- [Qemu-devel] [PATCH v6 03/18] dataplane/virtio-blk: check exit conditions before aio_poll(), Stefan Hajnoczi, 2013/07/25
- [Qemu-devel] [PATCH v6 02/18] block: stop relying on io_flush() in bdrv_drain_all(), Stefan Hajnoczi, 2013/07/25
- [Qemu-devel] [PATCH v6 06/18] aio: stop using .io_flush(), Stefan Hajnoczi, 2013/07/25
- Re: [Qemu-devel] [PATCH v6 06/18] aio: stop using .io_flush(),
Jeff Cody <=
- Re: [Qemu-devel] [PATCH v6 06/18] aio: stop using .io_flush(), Jeff Cody, 2013/07/26
- Re: [Qemu-devel] [PATCH v6 06/18] aio: stop using .io_flush(), Wenchao Xia, 2013/07/29
- [Qemu-devel] [PATCH v6 01/18] block: ensure bdrv_drain_all() works during bdrv_delete(), Stefan Hajnoczi, 2013/07/25
- [Qemu-devel] [PATCH v6 08/18] block/gluster: drop qemu_gluster_aio_flush_cb(), Stefan Hajnoczi, 2013/07/25
- [Qemu-devel] [PATCH v6 15/18] dataplane/virtio-blk: drop flush_true() and flush_io(), Stefan Hajnoczi, 2013/07/25