[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Stable-8.0.1 59/59] nbd/server: Fix drained_poll to wake coroutine in r
From: |
Michael Tokarev |
Subject: |
[Stable-8.0.1 59/59] nbd/server: Fix drained_poll to wake coroutine in right AioContext |
Date: |
Tue, 23 May 2023 13:15:19 +0300 |
From: Kevin Wolf <kwolf@redhat.com>
nbd_drained_poll() generally runs in the main thread, not whatever
iothread the NBD server coroutine is meant to run in, so it can't
directly reenter the coroutines to wake them up.
The code seems to have the right intention, it specifies the correct
AioContext when it calls qemu_aio_coroutine_enter(). However, this
functions doesn't schedule the coroutine to run in that AioContext, but
it assumes it is already called in the home thread of the AioContext.
To fix this, add a new thread-safe qio_channel_wake_read() that can be
called in the main thread to wake up the coroutine in its AioContext,
and use this in nbd_drained_poll().
Cc: qemu-stable@nongnu.org
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20230517152834.277483-3-kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit 7c1f51bf38de8cea4ed5030467646c37b46edeb7)
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
diff --git a/include/io/channel.h b/include/io/channel.h
index 153fbd2904..2b905423a9 100644
--- a/include/io/channel.h
+++ b/include/io/channel.h
@@ -757,6 +757,16 @@ void qio_channel_detach_aio_context(QIOChannel *ioc);
void coroutine_fn qio_channel_yield(QIOChannel *ioc,
GIOCondition condition);
+/**
+ * qio_channel_wake_read:
+ * @ioc: the channel object
+ *
+ * If qio_channel_yield() is currently waiting for the channel to become
+ * readable, interrupt it and reenter immediately. This function is safe to
call
+ * from any thread.
+ */
+void qio_channel_wake_read(QIOChannel *ioc);
+
/**
* qio_channel_wait:
* @ioc: the channel object
diff --git a/io/channel.c b/io/channel.c
index a8c7f11649..3c9b7beb65 100644
--- a/io/channel.c
+++ b/io/channel.c
@@ -19,6 +19,7 @@
*/
#include "qemu/osdep.h"
+#include "block/aio-wait.h"
#include "io/channel.h"
#include "qapi/error.h"
#include "qemu/main-loop.h"
@@ -514,7 +515,11 @@ int qio_channel_flush(QIOChannel *ioc,
static void qio_channel_restart_read(void *opaque)
{
QIOChannel *ioc = opaque;
- Coroutine *co = ioc->read_coroutine;
+ Coroutine *co = qatomic_xchg(&ioc->read_coroutine, NULL);
+
+ if (!co) {
+ return;
+ }
/* Assert that aio_co_wake() reenters the coroutine directly */
assert(qemu_get_current_aio_context() ==
@@ -525,7 +530,11 @@ static void qio_channel_restart_read(void *opaque)
static void qio_channel_restart_write(void *opaque)
{
QIOChannel *ioc = opaque;
- Coroutine *co = ioc->write_coroutine;
+ Coroutine *co = qatomic_xchg(&ioc->write_coroutine, NULL);
+
+ if (!co) {
+ return;
+ }
/* Assert that aio_co_wake() reenters the coroutine directly */
assert(qemu_get_current_aio_context() ==
@@ -568,7 +577,11 @@ void qio_channel_detach_aio_context(QIOChannel *ioc)
void coroutine_fn qio_channel_yield(QIOChannel *ioc,
GIOCondition condition)
{
+ AioContext *ioc_ctx = ioc->ctx ?: qemu_get_aio_context();
+
assert(qemu_in_coroutine());
+ assert(in_aio_context_home_thread(ioc_ctx));
+
if (condition == G_IO_IN) {
assert(!ioc->read_coroutine);
ioc->read_coroutine = qemu_coroutine_self();
@@ -580,18 +593,26 @@ void coroutine_fn qio_channel_yield(QIOChannel *ioc,
}
qio_channel_set_aio_fd_handlers(ioc);
qemu_coroutine_yield();
+ assert(in_aio_context_home_thread(ioc_ctx));
/* Allow interrupting the operation by reentering the coroutine other than
* through the aio_fd_handlers. */
- if (condition == G_IO_IN && ioc->read_coroutine) {
- ioc->read_coroutine = NULL;
+ if (condition == G_IO_IN) {
+ assert(ioc->read_coroutine == NULL);
qio_channel_set_aio_fd_handlers(ioc);
- } else if (condition == G_IO_OUT && ioc->write_coroutine) {
- ioc->write_coroutine = NULL;
+ } else if (condition == G_IO_OUT) {
+ assert(ioc->write_coroutine == NULL);
qio_channel_set_aio_fd_handlers(ioc);
}
}
+void qio_channel_wake_read(QIOChannel *ioc)
+{
+ Coroutine *co = qatomic_xchg(&ioc->read_coroutine, NULL);
+ if (co) {
+ aio_co_wake(co);
+ }
+}
static gboolean qio_channel_wait_complete(QIOChannel *ioc,
GIOCondition condition,
diff --git a/nbd/server.c b/nbd/server.c
index 3d8d0d81df..ea47522e8f 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -1599,8 +1599,7 @@ static bool nbd_drained_poll(void *opaque)
* enter it here so we don't depend on the client to wake it up.
*/
if (client->recv_coroutine != NULL && client->read_yielding) {
- qemu_aio_coroutine_enter(exp->common.ctx,
- client->recv_coroutine);
+ qio_channel_wake_read(client->ioc);
}
return true;
--
2.39.2
- [Stable-8.0.1 50/59] target/i386: fix avx2 instructions vzeroall and vpermdq, (continued)
- [Stable-8.0.1 50/59] target/i386: fix avx2 instructions vzeroall and vpermdq, Michael Tokarev, 2023/05/23
- [Stable-8.0.1 51/59] vhost: fix possible wrap in SVQ descriptor ring, Michael Tokarev, 2023/05/23
- [Stable-8.0.1 52/59] hw/pci: Disable PCI_ERR_UNCOR_MASK register for machine type < 8.0, Michael Tokarev, 2023/05/23
- [Stable-8.0.1 49/59] target/i386: fix operand size for VCOMI/VUCOMI instructions, Michael Tokarev, 2023/05/23
- [Stable-8.0.1 53/59] virtio-net: not enable vq reset feature unconditionally, Michael Tokarev, 2023/05/23
- [Stable-8.0.1 54/59] virtio-crypto: fix NULL pointer dereference in virtio_crypto_free_request, Michael Tokarev, 2023/05/23
- [Stable-8.0.1 55/59] aio-posix: do not nest poll handlers, Michael Tokarev, 2023/05/23
- [Stable-8.0.1 56/59] tested: add test for nested aio_poll() in poll handlers, Michael Tokarev, 2023/05/23
- [Stable-8.0.1 57/59] block: compile out assert_bdrv_graph_readable() by default, Michael Tokarev, 2023/05/23
- [Stable-8.0.1 58/59] graph-lock: Disable locking for now, Michael Tokarev, 2023/05/23
- [Stable-8.0.1 59/59] nbd/server: Fix drained_poll to wake coroutine in right AioContext,
Michael Tokarev <=