qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-block] [PATCH 02/35] WIP: coroutine: manually tag the fast-paths


From: Marc-André Lureau
Subject: [Qemu-block] [PATCH 02/35] WIP: coroutine: manually tag the fast-paths
Date: Wed, 5 Jul 2017 00:03:13 +0200

Some functions are both regular and coroutine. They may call coroutine
functions in some cases, if it is known to be running in a coroutine.

Signed-off-by: Marc-André Lureau <address@hidden>
---
 block.c                 |  2 ++
 block/block-backend.c   |  2 ++
 block/io.c              | 16 +++++++++++++++-
 block/sheepdog.c        |  2 ++
 block/throttle-groups.c | 10 ++++++++--
 migration/rdma.c        |  2 ++
 6 files changed, 31 insertions(+), 3 deletions(-)

diff --git a/block.c b/block.c
index 694396281b..b08c006da4 100644
--- a/block.c
+++ b/block.c
@@ -443,7 +443,9 @@ int bdrv_create(BlockDriver *drv, const char* filename,
 
     if (qemu_in_coroutine()) {
         /* Fast-path if already in coroutine context */
+        co_role_acquire(_coroutine_fn);
         bdrv_create_co_entry(&cco);
+        co_role_release(_coroutine_fn);
     } else {
         co = qemu_coroutine_create(bdrv_create_co_entry, &cco);
         qemu_coroutine_enter(co);
diff --git a/block/block-backend.c b/block/block-backend.c
index 0df3457a09..56fc0a4d1e 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -1072,7 +1072,9 @@ static int blk_prw(BlockBackend *blk, int64_t offset, 
uint8_t *buf,
 
     if (qemu_in_coroutine()) {
         /* Fast-path if already in coroutine context */
+        co_role_acquire(_coroutine_fn);
         co_entry(&rwco);
+        co_role_release(_coroutine_fn);
     } else {
         Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
         bdrv_coroutine_enter(blk_bs(blk), co);
diff --git a/block/io.c b/block/io.c
index 2de7c77983..14b88c8609 100644
--- a/block/io.c
+++ b/block/io.c
@@ -229,7 +229,9 @@ static void coroutine_fn 
bdrv_co_yield_to_drain(BlockDriverState *bs)
 void bdrv_drained_begin(BlockDriverState *bs)
 {
     if (qemu_in_coroutine()) {
+        co_role_acquire(_coroutine_fn);
         bdrv_co_yield_to_drain(bs);
+        co_role_release(_coroutine_fn);
         return;
     }
 
@@ -616,7 +618,9 @@ static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
 
     if (qemu_in_coroutine()) {
         /* Fast-path if already in coroutine context */
+        co_role_acquire(_coroutine_fn);
         bdrv_rw_co_entry(&rwco);
+        co_role_release(_coroutine_fn);
     } else {
         co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
         bdrv_coroutine_enter(child->bs, co);
@@ -1901,7 +1905,9 @@ int64_t bdrv_get_block_status_above(BlockDriverState *bs,
 
     if (qemu_in_coroutine()) {
         /* Fast-path if already in coroutine context */
+        co_role_acquire(_coroutine_fn);
         bdrv_get_block_status_above_co_entry(&data);
+        co_role_release(_coroutine_fn);
     } else {
         co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
                                    &data);
@@ -2027,7 +2033,11 @@ bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector 
*qiov, int64_t pos,
                 bool is_read)
 {
     if (qemu_in_coroutine()) {
-        return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
+        int ret;
+        co_role_acquire(_coroutine_fn);
+        ret = bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
+        co_role_release(_coroutine_fn);
+        return ret;
     } else {
         BdrvVmstateCo data = {
             .bs         = bs,
@@ -2259,7 +2269,9 @@ int bdrv_flush(BlockDriverState *bs)
 
     if (qemu_in_coroutine()) {
         /* Fast-path if already in coroutine context */
+        co_role_acquire(_coroutine_fn);
         bdrv_flush_co_entry(&flush_co);
+        co_role_release(_coroutine_fn);
     } else {
         co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
         bdrv_coroutine_enter(bs, co);
@@ -2406,7 +2418,9 @@ int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, 
int bytes)
 
     if (qemu_in_coroutine()) {
         /* Fast-path if already in coroutine context */
+        co_role_acquire(_coroutine_fn);
         bdrv_pdiscard_co_entry(&rwco);
+        co_role_release(_coroutine_fn);
     } else {
         co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
         bdrv_coroutine_enter(bs, co);
diff --git a/block/sheepdog.c b/block/sheepdog.c
index 08d7b11e9d..83bc43dde4 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -726,7 +726,9 @@ static int do_req(int sockfd, BlockDriverState *bs, 
SheepdogReq *hdr,
     };
 
     if (qemu_in_coroutine()) {
+        co_role_acquire(_coroutine_fn);
         do_co_req(&srco);
+        co_role_release(_coroutine_fn);
     } else {
         co = qemu_coroutine_create(do_co_req, &srco);
         if (bs) {
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index da2b490c38..8778f78965 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -304,9 +304,15 @@ static void schedule_next_request(BlockBackend *blk, bool 
is_write)
 
     /* If it doesn't have to wait, queue it for immediate execution */
     if (!must_wait) {
+        bool handled = false;
+
+        if (qemu_in_coroutine()) {
+            co_role_acquire(_coroutine_fn);
+            handled = throttle_group_co_restart_queue(blk, is_write);
+            co_role_release(_coroutine_fn);
+        }
         /* Give preference to requests from the current blk */
-        if (qemu_in_coroutine() &&
-            throttle_group_co_restart_queue(blk, is_write)) {
+        if (handled) {
             token = blk;
         } else {
             ThrottleTimers *tt = &blk_get_public(token)->throttle_timers;
diff --git a/migration/rdma.c b/migration/rdma.c
index c6bc607a03..8c00d4d74c 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -1518,7 +1518,9 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma, 
int wrid_requested,
          * so don't yield unless we know we're running inside of a coroutine.
          */
         if (rdma->migration_started_on_destination) {
+            co_role_acquire(_coroutine_fn);
             yield_until_fd_readable(rdma->comp_channel->fd);
+            co_role_release(_coroutine_fn);
         }
 
         if (ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx)) {
-- 
2.13.1.395.gf7b71de06




reply via email to

[Prev in Thread] Current Thread [Next in Thread]