[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 33/40] block: explicitly acquire aiocontext in botto
From: |
Paolo Bonzini |
Subject: |
[Qemu-devel] [PATCH 33/40] block: explicitly acquire aiocontext in bottom halves that need it |
Date: |
Tue, 24 Nov 2015 19:01:24 +0100 |
Signed-off-by: Paolo Bonzini <address@hidden>
---
async.c | 2 --
block/archipelago.c | 3 +++
block/blkdebug.c | 4 ++++
block/blkverify.c | 3 +++
block/block-backend.c | 4 ++++
block/curl.c | 25 +++++++++++++++++--------
block/gluster.c | 2 ++
block/io.c | 6 ++++++
block/iscsi.c | 6 ++++++
block/linux-aio.c | 7 +++++++
block/nfs.c | 4 ++++
block/null.c | 4 ++++
block/qed.c | 13 +++++++++++++
block/qed.h | 3 +++
block/rbd.c | 4 ++++
dma-helpers.c | 7 +++++--
hw/block/virtio-blk.c | 2 ++
hw/scsi/scsi-bus.c | 2 ++
thread-pool.c | 2 ++
19 files changed, 91 insertions(+), 12 deletions(-)
diff --git a/async.c b/async.c
index 03fd05a..4c1f658 100644
--- a/async.c
+++ b/async.c
@@ -87,9 +87,7 @@ int aio_bh_poll(AioContext *ctx)
ret = 1;
}
bh->idle = 0;
- aio_context_acquire(ctx);
aio_bh_call(bh);
- aio_context_release(ctx);
}
}
diff --git a/block/archipelago.c b/block/archipelago.c
index 855655c..7f69a3f 100644
--- a/block/archipelago.c
+++ b/block/archipelago.c
@@ -312,9 +312,12 @@ static void qemu_archipelago_complete_aio(void *opaque)
{
AIORequestData *reqdata = (AIORequestData *) opaque;
ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb;
+ AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs);
qemu_bh_delete(aio_cb->bh);
+ aio_context_acquire(ctx);
aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret);
+ aio_context_release(ctx);
aio_cb->status = 0;
qemu_aio_unref(aio_cb);
diff --git a/block/blkdebug.c b/block/blkdebug.c
index 6860a2b..ba35185 100644
--- a/block/blkdebug.c
+++ b/block/blkdebug.c
@@ -458,8 +458,12 @@ out:
static void error_callback_bh(void *opaque)
{
struct BlkdebugAIOCB *acb = opaque;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
+
qemu_bh_delete(acb->bh);
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, acb->ret);
+ aio_context_release(ctx);
qemu_aio_unref(acb);
}
diff --git a/block/blkverify.c b/block/blkverify.c
index c5f8e8d..3ff681a 100644
--- a/block/blkverify.c
+++ b/block/blkverify.c
@@ -188,13 +188,16 @@ static BlkverifyAIOCB *blkverify_aio_get(BlockDriverState
*bs, bool is_write,
static void blkverify_aio_bh(void *opaque)
{
BlkverifyAIOCB *acb = opaque;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
qemu_bh_delete(acb->bh);
if (acb->buf) {
qemu_iovec_destroy(&acb->raw_qiov);
qemu_vfree(acb->buf);
}
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, acb->ret);
+ aio_context_release(ctx);
qemu_aio_unref(acb);
}
diff --git a/block/block-backend.c b/block/block-backend.c
index 36ccc9e..8549289 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -637,8 +637,12 @@ int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
static void error_callback_bh(void *opaque)
{
struct BlockBackendAIOCB *acb = opaque;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
+
qemu_bh_delete(acb->bh);
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, acb->ret);
+ aio_context_release(ctx);
qemu_aio_unref(acb);
}
diff --git a/block/curl.c b/block/curl.c
index 3d7e1cb..17add0a 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -653,10 +653,14 @@ static void curl_readv_bh_cb(void *p)
{
CURLState *state;
int running;
+ int ret = -EINPROGRESS;
CURLAIOCB *acb = p;
- BDRVCURLState *s = acb->common.bs->opaque;
+ BlockDriverState *bs = acb->common.bs;
+ BDRVCURLState *s = bs->opaque;
+ AioContext *ctx = bdrv_get_aio_context(bs);
+ aio_context_acquire(ctx);
qemu_bh_delete(acb->bh);
acb->bh = NULL;
@@ -670,7 +674,7 @@ static void curl_readv_bh_cb(void *p)
qemu_aio_unref(acb);
// fall through
case FIND_RET_WAIT:
- return;
+ goto out;
default:
break;
}
@@ -678,9 +682,8 @@ static void curl_readv_bh_cb(void *p)
// No cache found, so let's start a new request
state = curl_init_state(acb->common.bs, s);
if (!state) {
- acb->common.cb(acb->common.opaque, -EIO);
- qemu_aio_unref(acb);
- return;
+ ret = -EIO;
+ goto out;
}
acb->start = 0;
@@ -694,9 +697,8 @@ static void curl_readv_bh_cb(void *p)
state->orig_buf = g_try_malloc(state->buf_len);
if (state->buf_len && state->orig_buf == NULL) {
curl_clean_state(state);
- acb->common.cb(acb->common.opaque, -ENOMEM);
- qemu_aio_unref(acb);
- return;
+ ret = -ENOMEM;
+ goto out;
}
state->acb[0] = acb;
@@ -709,6 +711,13 @@ static void curl_readv_bh_cb(void *p)
/* Tell curl it needs to kick things off */
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
+
+out:
+ if (ret != -EINPROGRESS) {
+ acb->common.cb(acb->common.opaque, ret);
+ qemu_aio_unref(acb);
+ }
+ aio_context_release(ctx);
}
static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,
diff --git a/block/gluster.c b/block/gluster.c
index 0857c14..5aa34ea 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -232,7 +232,9 @@ static void qemu_gluster_complete_aio(void *opaque)
qemu_bh_delete(acb->bh);
acb->bh = NULL;
+ aio_context_acquire(acb->aio_context);
qemu_coroutine_enter(acb->coroutine, NULL);
+ aio_context_release(acb->aio_context);
}
/*
diff --git a/block/io.c b/block/io.c
index adc1eab..4b3e2b2 100644
--- a/block/io.c
+++ b/block/io.c
@@ -2036,12 +2036,15 @@ static const AIOCBInfo bdrv_em_aiocb_info = {
static void bdrv_aio_bh_cb(void *opaque)
{
BlockAIOCBSync *acb = opaque;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
if (!acb->is_write && acb->ret >= 0) {
qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
}
qemu_vfree(acb->bounce);
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, acb->ret);
+ aio_context_release(ctx);
qemu_bh_delete(acb->bh);
acb->bh = NULL;
qemu_aio_unref(acb);
@@ -2117,10 +2120,13 @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
static void bdrv_co_em_bh(void *opaque)
{
BlockAIOCBCoroutine *acb = opaque;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
assert(!acb->need_bh);
qemu_bh_delete(acb->bh);
+ aio_context_acquire(ctx);
bdrv_co_complete(acb);
+ aio_context_release(ctx);
}
static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
diff --git a/block/iscsi.c b/block/iscsi.c
index 16c3b44..72c9171 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -129,7 +129,9 @@ iscsi_bh_cb(void *p)
g_free(acb->buf);
acb->buf = NULL;
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, acb->status);
+ aio_context_release(ctx);
if (acb->task != NULL) {
scsi_free_scsi_task(acb->task);
@@ -152,9 +154,13 @@ iscsi_schedule_bh(IscsiAIOCB *acb)
static void iscsi_co_generic_bh_cb(void *opaque)
{
struct IscsiTask *iTask = opaque;
+ AioContext *ctx = iTask->iscsilun->aio_context;
+
iTask->complete = 1;
qemu_bh_delete(iTask->bh);
+ aio_context_acquire(ctx);
qemu_coroutine_enter(iTask->co, NULL);
+ aio_context_release(ctx);
}
static void iscsi_retry_timer_expired(void *opaque)
diff --git a/block/linux-aio.c b/block/linux-aio.c
index 88b0520..0e94a86 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -46,6 +46,8 @@ typedef struct {
} LaioQueue;
struct qemu_laio_state {
+ AioContext *aio_context;
+
io_context_t ctx;
EventNotifier e;
@@ -109,6 +111,7 @@ static void qemu_laio_completion_bh(void *opaque)
struct qemu_laio_state *s = opaque;
/* Fetch more completion events when empty */
+ aio_context_acquire(s->aio_context);
if (s->event_idx == s->event_max) {
do {
struct timespec ts = { 0 };
@@ -141,6 +144,8 @@ static void qemu_laio_completion_bh(void *opaque)
if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
ioq_submit(s);
}
+
+ aio_context_release(s->aio_context);
}
static void qemu_laio_completion_cb(EventNotifier *e)
@@ -289,12 +294,14 @@ void laio_detach_aio_context(void *s_, AioContext
*old_context)
aio_set_event_notifier(old_context, &s->e, false, NULL);
qemu_bh_delete(s->completion_bh);
+ s->aio_context = NULL;
}
void laio_attach_aio_context(void *s_, AioContext *new_context)
{
struct qemu_laio_state *s = s_;
+ s->aio_context = new_context;
s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
aio_set_event_notifier(new_context, &s->e, false,
qemu_laio_completion_cb);
diff --git a/block/nfs.c b/block/nfs.c
index 910a51e..c86850b 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -103,9 +103,13 @@ static void nfs_co_init_task(NFSClient *client, NFSRPC
*task)
static void nfs_co_generic_bh_cb(void *opaque)
{
NFSRPC *task = opaque;
+ AioContext *ctx = task->client->aio_context;
+
task->complete = 1;
qemu_bh_delete(task->bh);
+ aio_context_acquire(ctx);
qemu_coroutine_enter(task->co, NULL);
+ aio_context_release(ctx);
}
static void
diff --git a/block/null.c b/block/null.c
index 7d08323..dd1b170 100644
--- a/block/null.c
+++ b/block/null.c
@@ -117,7 +117,11 @@ static const AIOCBInfo null_aiocb_info = {
static void null_bh_cb(void *opaque)
{
NullAIOCB *acb = opaque;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
+
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, 0);
+ aio_context_release(ctx);
qemu_bh_delete(acb->bh);
qemu_aio_unref(acb);
}
diff --git a/block/qed.c b/block/qed.c
index 3d6aa07..d128772 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -348,6 +348,16 @@ static void qed_need_check_timer_cb(void *opaque)
bdrv_aio_flush(s->bs, qed_clear_need_check, s);
}
+void qed_acquire(BDRVQEDState *s)
+{
+ aio_context_acquire(bdrv_get_aio_context(s->bs));
+}
+
+void qed_release(BDRVQEDState *s)
+{
+ aio_context_release(bdrv_get_aio_context(s->bs));
+}
+
static void qed_start_need_check_timer(BDRVQEDState *s)
{
trace_qed_start_need_check_timer(s);
@@ -925,6 +935,7 @@ static void qed_update_l2_table(BDRVQEDState *s, QEDTable
*table, int index,
static void qed_aio_complete_bh(void *opaque)
{
QEDAIOCB *acb = opaque;
+ BDRVQEDState *s = acb_to_s(acb);
BlockCompletionFunc *cb = acb->common.cb;
void *user_opaque = acb->common.opaque;
int ret = acb->bh_ret;
@@ -933,7 +944,9 @@ static void qed_aio_complete_bh(void *opaque)
qemu_aio_unref(acb);
/* Invoke callback */
+ qed_acquire(s);
cb(user_opaque, ret);
+ qed_release(s);
}
static void qed_aio_complete(QEDAIOCB *acb, int ret)
diff --git a/block/qed.h b/block/qed.h
index 615e676..106cefe 100644
--- a/block/qed.h
+++ b/block/qed.h
@@ -198,6 +198,9 @@ enum {
*/
typedef void QEDFindClusterFunc(void *opaque, int ret, uint64_t offset, size_t
len);
+void qed_acquire(BDRVQEDState *s);
+void qed_release(BDRVQEDState *s);
+
/**
* Generic callback for chaining async callbacks
*/
diff --git a/block/rbd.c b/block/rbd.c
index a60a19d..6206dc3 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -376,6 +376,7 @@ static int qemu_rbd_create(const char *filename, QemuOpts
*opts, Error **errp)
static void qemu_rbd_complete_aio(RADOSCB *rcb)
{
RBDAIOCB *acb = rcb->acb;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
int64_t r;
r = rcb->ret;
@@ -408,7 +409,10 @@ static void qemu_rbd_complete_aio(RADOSCB *rcb)
qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
}
qemu_vfree(acb->bounce);
+
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
+ aio_context_release(ctx);
qemu_aio_unref(acb);
}
diff --git a/dma-helpers.c b/dma-helpers.c
index 4faec5d..68f6f07 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -69,6 +69,7 @@ void qemu_sglist_destroy(QEMUSGList *qsg)
typedef struct {
BlockAIOCB common;
+ AioContext *ctx;
BlockBackend *blk;
BlockAIOCB *acb;
QEMUSGList *sg;
@@ -153,8 +154,7 @@ static void dma_blk_cb(void *opaque, int ret)
if (dbs->iov.size == 0) {
trace_dma_map_wait(dbs);
- dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
- reschedule_dma, dbs);
+ dbs->bh = aio_bh_new(dbs->ctx, reschedule_dma, dbs);
cpu_register_map_client(dbs->bh);
return;
}
@@ -163,8 +163,10 @@ static void dma_blk_cb(void *opaque, int ret)
qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK);
}
+ aio_context_acquire(dbs->ctx);
dbs->acb = dbs->io_func(dbs->blk, dbs->sector_num, &dbs->iov,
dbs->iov.size / 512, dma_blk_cb, dbs);
+ aio_context_release(dbs->ctx);
assert(dbs->acb);
}
@@ -201,6 +203,7 @@ BlockAIOCB *dma_blk_io(
dbs->acb = NULL;
dbs->blk = blk;
+ dbs->ctx = blk_get_aio_context(blk);
dbs->sg = sg;
dbs->sector_num = sector_num;
dbs->sg_cur_index = 0;
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index d72942e..5c1cb89 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -626,6 +626,7 @@ static void virtio_blk_dma_restart_bh(void *opaque)
s->rq = NULL;
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
while (req) {
VirtIOBlockReq *next = req->next;
virtio_blk_handle_request(req, &mrb);
@@ -635,6 +636,7 @@ static void virtio_blk_dma_restart_bh(void *opaque)
if (mrb.num_reqs) {
virtio_blk_submit_multireq(s->blk, &mrb);
}
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
static void virtio_blk_dma_restart_cb(void *opaque, int running,
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index fd1171e..0d607b8 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -102,6 +102,7 @@ static void scsi_dma_restart_bh(void *opaque)
qemu_bh_delete(s->bh);
s->bh = NULL;
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
scsi_req_ref(req);
if (req->retry) {
@@ -119,6 +120,7 @@ static void scsi_dma_restart_bh(void *opaque)
}
scsi_req_unref(req);
}
+ aio_context_release(blk_get_aio_context(s->conf.blk));
}
void scsi_req_retry(SCSIRequest *req)
diff --git a/thread-pool.c b/thread-pool.c
index 402c778..bffd823 100644
--- a/thread-pool.c
+++ b/thread-pool.c
@@ -165,6 +165,7 @@ static void thread_pool_completion_bh(void *opaque)
ThreadPool *pool = opaque;
ThreadPoolElement *elem, *next;
+ aio_context_acquire(pool->ctx);
restart:
QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
if (elem->state != THREAD_DONE) {
@@ -191,6 +192,7 @@ restart:
qemu_aio_unref(elem);
}
}
+ aio_context_release(pool->ctx);
}
static void thread_pool_cancel(BlockAIOCB *acb)
--
1.8.3.1
- [Qemu-devel] [PATCH 27/40] aio: document locking, (continued)
- [Qemu-devel] [PATCH 27/40] aio: document locking, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 23/40] qemu-thread: optimize QemuLockCnt with futexes on Linux, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 21/40] qemu-thread: introduce QemuLockCnt, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 26/40] aio-win32: remove walking_handlers, protecting AioHandler list with list_lock, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 25/40] aio-posix: remove walking_handlers, protecting AioHandler list with list_lock, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 29/40] quorum: use atomics for rewrite_count, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 30/40] quorum: split quorum_fifo_aio_cb from quorum_aio_cb, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 28/40] aio: push aio_context_acquire/release down to dispatching, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 31/40] qed: introduce qed_aio_start_io and qed_aio_next_io_cb, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 34/40] block: explicitly acquire aiocontext in timers that need it, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 33/40] block: explicitly acquire aiocontext in bottom halves that need it,
Paolo Bonzini <=
- [Qemu-devel] [PATCH 37/40] async: optimize aio_bh_poll, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 36/40] aio: update locking documentation, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 32/40] block: explicitly acquire aiocontext in callbacks that need it, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 38/40] aio-posix: partially inline aio_dispatch into aio_poll, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 35/40] block: explicitly acquire aiocontext in aio callbacks that need it, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 39/40] async: remove unnecessary inc/dec pairs, Paolo Bonzini, 2015/11/24
- [Qemu-devel] [PATCH 40/40] dma-helpers: avoid lock inversion with AioContext, Paolo Bonzini, 2015/11/24
- Re: [Qemu-devel] [RFC PATCH 00/40] Sneak peek of virtio and dataplane changes for 2.6, Christian Borntraeger, 2015/11/26