[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-block] [PATCH 34/40] block: explicitly acquire aiocontext in timer
From: |
Paolo Bonzini |
Subject: |
[Qemu-block] [PATCH 34/40] block: explicitly acquire aiocontext in timers that need it |
Date: |
Tue, 24 Nov 2015 19:01:25 +0100 |
Signed-off-by: Paolo Bonzini <address@hidden>
---
aio-posix.c | 2 --
aio-win32.c | 2 --
block/curl.c | 2 ++
block/iscsi.c | 2 ++
block/null.c | 4 ++++
block/qed.c | 2 ++
block/throttle-groups.c | 2 ++
util/qemu-coroutine-sleep.c | 5 +++++
8 files changed, 17 insertions(+), 4 deletions(-)
diff --git a/aio-posix.c b/aio-posix.c
index 972f3ff..aabc4ae 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -359,9 +359,7 @@ bool aio_dispatch(AioContext *ctx)
qemu_lockcnt_dec(&ctx->list_lock);
/* Run our timers */
- aio_context_acquire(ctx);
progress |= timerlistgroup_run_timers(&ctx->tlg);
- aio_context_release(ctx);
return progress;
}
diff --git a/aio-win32.c b/aio-win32.c
index 1b50019..4479d3f 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -372,9 +372,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
progress |= aio_dispatch_handlers(ctx, event);
} while (count > 0);
- aio_context_acquire(ctx);
progress |= timerlistgroup_run_timers(&ctx->tlg);
- aio_context_release(ctx);
return progress;
}
diff --git a/block/curl.c b/block/curl.c
index 17add0a..c2b6726 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -376,9 +376,11 @@ static void curl_multi_timeout_do(void *arg)
return;
}
+ aio_context_acquire(s->aio_context);
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
curl_multi_check_completion(s);
+ aio_context_release(s->aio_context);
#else
abort();
#endif
diff --git a/block/iscsi.c b/block/iscsi.c
index 72c9171..411aef8 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -1214,6 +1214,7 @@ static void iscsi_nop_timed_event(void *opaque)
{
IscsiLun *iscsilun = opaque;
+ aio_context_acquire(iscsilun->aio_context);
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
error_report("iSCSI: NOP timeout. Reconnecting...");
iscsilun->request_timed_out = true;
@@ -1224,6 +1225,7 @@ static void iscsi_nop_timed_event(void *opaque)
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) +
NOP_INTERVAL);
iscsi_set_events(iscsilun);
+ aio_context_release(iscsilun->aio_context);
}
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
diff --git a/block/null.c b/block/null.c
index dd1b170..9bddc1b 100644
--- a/block/null.c
+++ b/block/null.c
@@ -129,7 +129,11 @@ static void null_bh_cb(void *opaque)
static void null_timer_cb(void *opaque)
{
NullAIOCB *acb = opaque;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
+
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, 0);
+ aio_context_release(ctx);
timer_deinit(&acb->timer);
qemu_aio_unref(acb);
}
diff --git a/block/qed.c b/block/qed.c
index d128772..17777d1 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -342,10 +342,12 @@ static void qed_need_check_timer_cb(void *opaque)
trace_qed_need_check_timer_cb(s);
+ qed_acquire(s);
qed_plug_allocating_write_reqs(s);
/* Ensure writes are on disk before clearing flag */
bdrv_aio_flush(s->bs, qed_clear_need_check, s);
+ qed_release(s);
}
void qed_acquire(BDRVQEDState *s)
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index 13b5baa..cdb819a 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -370,7 +370,9 @@ static void timer_cb(BlockDriverState *bs, bool is_write)
qemu_mutex_unlock(&tg->lock);
/* Run the request that was waiting for this timer */
+ aio_context_acquire(bdrv_get_aio_context(bs));
empty_queue = !qemu_co_enter_next(&bs->throttled_reqs[is_write]);
+ aio_context_release(bdrv_get_aio_context(bs));
/* If the request queue was empty then we have to take care of
* scheduling the next one */
diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c
index b35db56..6e07343 100644
--- a/util/qemu-coroutine-sleep.c
+++ b/util/qemu-coroutine-sleep.c
@@ -18,13 +18,17 @@
typedef struct CoSleepCB {
QEMUTimer *ts;
Coroutine *co;
+ AioContext *ctx;
} CoSleepCB;
static void co_sleep_cb(void *opaque)
{
CoSleepCB *sleep_cb = opaque;
+ AioContext *ctx = sleep_cb->ctx;
+ aio_context_acquire(ctx);
qemu_coroutine_enter(sleep_cb->co, NULL);
+ aio_context_release(ctx);
}
void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
@@ -32,6 +36,7 @@ void coroutine_fn co_aio_sleep_ns(AioContext *ctx,
QEMUClockType type,
{
CoSleepCB sleep_cb = {
.co = qemu_coroutine_self(),
+ .ctx = ctx,
};
sleep_cb.ts = aio_timer_new(ctx, type, SCALE_NS, co_sleep_cb, &sleep_cb);
timer_mod(sleep_cb.ts, qemu_clock_get_ns(type) + ns);
--
1.8.3.1
- [Qemu-block] [PATCH 21/40] qemu-thread: introduce QemuLockCnt, (continued)
- [Qemu-block] [PATCH 21/40] qemu-thread: introduce QemuLockCnt, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 23/40] qemu-thread: optimize QemuLockCnt with futexes on Linux, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 24/40] aio: tweak walking in dispatch phase, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 26/40] aio-win32: remove walking_handlers, protecting AioHandler list with list_lock, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 27/40] aio: document locking, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 25/40] aio-posix: remove walking_handlers, protecting AioHandler list with list_lock, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 29/40] quorum: use atomics for rewrite_count, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 30/40] quorum: split quorum_fifo_aio_cb from quorum_aio_cb, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 28/40] aio: push aio_context_acquire/release down to dispatching, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 31/40] qed: introduce qed_aio_start_io and qed_aio_next_io_cb, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 34/40] block: explicitly acquire aiocontext in timers that need it,
Paolo Bonzini <=
- [Qemu-block] [PATCH 33/40] block: explicitly acquire aiocontext in bottom halves that need it, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 32/40] block: explicitly acquire aiocontext in callbacks that need it, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 37/40] async: optimize aio_bh_poll, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 36/40] aio: update locking documentation, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 35/40] block: explicitly acquire aiocontext in aio callbacks that need it, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 38/40] aio-posix: partially inline aio_dispatch into aio_poll, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 40/40] dma-helpers: avoid lock inversion with AioContext, Paolo Bonzini, 2015/11/24
- [Qemu-block] [PATCH 39/40] async: remove unnecessary inc/dec pairs, Paolo Bonzini, 2015/11/24
- Re: [Qemu-block] [Qemu-devel] [RFC PATCH 00/40] Sneak peek of virtio and dataplane changes for 2.6, Christian Borntraeger, 2015/11/26