[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 07/17] throttle-groups: do not use qemu_co_enter_nex
From: |
Paolo Bonzini |
Subject: |
[Qemu-devel] [PATCH 07/17] throttle-groups: do not use qemu_co_enter_next |
Date: |
Thu, 20 Apr 2017 14:00:48 +0200 |
Prepare for removing this function; always restart throttled requests
from coroutine context. This will matter when restarting throttled
requests will have to acquire a CoMutex.
Signed-off-by: Paolo Bonzini <address@hidden>
---
block/throttle-groups.c | 65 +++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 58 insertions(+), 7 deletions(-)
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index 69bfbd4..d66bf62 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -260,6 +260,18 @@ static bool throttle_group_schedule_timer(BlockBackend
*blk, bool is_write)
return must_wait;
}
+/* Start the next pending I/O request for a BlockBackend.
+ *
+ * @blk: the current BlockBackend
+ * @is_write: the type of operation (read/write)
+ */
+static bool throttle_group_co_restart_queue(BlockBackend *blk, bool is_write)
+{
+ BlockBackendPublic *blkp = blk_get_public(blk);
+
+ return qemu_co_queue_next(&blkp->throttled_reqs[is_write]);
+}
+
/* Look for the next pending I/O request and schedule it.
*
* This assumes that tg->lock is held.
@@ -287,7 +299,7 @@ static void schedule_next_request(BlockBackend *blk, bool
is_write)
if (!must_wait) {
/* Give preference to requests from the current blk */
if (qemu_in_coroutine() &&
- qemu_co_queue_next(&blkp->throttled_reqs[is_write])) {
+ throttle_group_co_restart_queue(blk, is_write)) {
token = blk;
} else {
ThrottleTimers *tt = &blk_get_public(token)->throttle_timers;
@@ -340,18 +352,57 @@ void coroutine_fn
throttle_group_co_io_limits_intercept(BlockBackend *blk,
qemu_mutex_unlock(&tg->lock);
}
-void throttle_group_restart_blk(BlockBackend *blk)
+typedef struct {
+ BlockBackend *blk;
+ bool is_write;
+ int ret;
+} RestartData;
+
+static void throttle_group_restart_queue_entry(void *opaque)
{
- BlockBackendPublic *blkp = blk_get_public(blk);
+ RestartData *data = opaque;
+
+ data->ret = throttle_group_co_restart_queue(data->blk, data->is_write);
+}
+
+static int throttle_group_restart_queue(BlockBackend *blk, bool is_write)
+{
+ Coroutine *co;
+ RestartData rd = {
+ .blk = blk,
+ .is_write = is_write
+ };
+
+ aio_context_acquire(blk_get_aio_context(blk));
+ co = qemu_coroutine_create(throttle_group_restart_queue_entry, &rd);
+ /* The request doesn't start until after throttle_group_restart_queue_entry
+ * returns, so the coroutine cannot yield.
+ */
+ qemu_coroutine_enter(co);
+ aio_context_release(blk_get_aio_context(blk));
+ return rd.ret;
+}
+
+static void throttle_group_restart_blk_entry(void *opaque)
+{
+ BlockBackend *blk = opaque;
int i;
for (i = 0; i < 2; i++) {
- while (qemu_co_enter_next(&blkp->throttled_reqs[i])) {
+ while (throttle_group_co_restart_queue(blk, i)) {
;
}
}
}
+void throttle_group_restart_blk(BlockBackend *blk)
+{
+ Coroutine *co;
+
+ co = qemu_coroutine_create(throttle_group_restart_blk_entry, blk);
+ qemu_coroutine_enter(co);
+}
+
/* Update the throttle configuration for a particular group. Similar
* to throttle_config(), but guarantees atomicity within the
* throttling group.
@@ -376,8 +427,8 @@ void throttle_group_config(BlockBackend *blk,
ThrottleConfig *cfg)
throttle_config(ts, tt, cfg);
qemu_mutex_unlock(&tg->lock);
- qemu_co_enter_next(&blkp->throttled_reqs[0]);
- qemu_co_enter_next(&blkp->throttled_reqs[1]);
+ throttle_group_restart_queue(blk, 0);
+ throttle_group_restart_queue(blk, 1);
}
/* Get the throttle configuration from a particular group. Similar to
@@ -417,7 +468,7 @@ static void timer_cb(BlockBackend *blk, bool is_write)
/* Run the request that was waiting for this timer */
aio_context_acquire(blk_get_aio_context(blk));
- empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]);
+ empty_queue = !throttle_group_restart_queue(blk, is_write);
aio_context_release(blk_get_aio_context(blk));
/* If the request queue was empty then we have to take care of
--
2.9.3
- [Qemu-devel] [PATCH for 2.10 00/17] Block layer thread safety, part 1, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 07/17] throttle-groups: do not use qemu_co_enter_next,
Paolo Bonzini <=
- [Qemu-devel] [PATCH 06/17] block: access io_plugged with atomic ops, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 05/17] block: access wakeup with atomic ops, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 11/17] block: access write_gen with atomics, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 01/17] block: access copy_on_read with atomic ops, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 13/17] coroutine-lock: introduce qemu_co_mutex_lock_unlock, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 09/17] util: add stats64 module, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 14/17] block: optimize access to reqs_lock, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 03/17] block: access io_limits_disabled with atomic ops, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 02/17] block: access quiesce_counter with atomic ops, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 04/17] block: access serialising_in_flight with atomic ops, Paolo Bonzini, 2017/04/20