[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 08/15] block-backend: drop blk_prw, use block-coroutine-wrapper
From: |
Eric Blake |
Subject: |
[PULL 08/15] block-backend: drop blk_prw, use block-coroutine-wrapper |
Date: |
Fri, 15 Oct 2021 16:10:03 -0500 |
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Let's drop hand-made coroutine wrappers and use coroutine wrapper
generation like in block/io.c.
Now, blk_foo() functions are written in same way as blk_co_foo() ones,
but wrap blk_do_foo() instead of blk_co_do_foo().
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20211006131718.214235-8-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[eblake: spelling fix]
Signed-off-by: Eric Blake <eblake@redhat.com>
---
block/coroutines.h | 30 ++++++++
block/block-backend.c | 155 ++++++++++++++++--------------------------
2 files changed, 90 insertions(+), 95 deletions(-)
diff --git a/block/coroutines.h b/block/coroutines.h
index 35a6c4985782..c8c14a29c834 100644
--- a/block/coroutines.h
+++ b/block/coroutines.h
@@ -75,4 +75,34 @@ int coroutine_fn
nbd_co_do_establish_connection(BlockDriverState *bs, Error **errp);
+int generated_co_wrapper
+blk_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, BdrvRequestFlags flags);
+int coroutine_fn
+blk_co_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, BdrvRequestFlags flags);
+
+
+int generated_co_wrapper
+blk_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset,
+ BdrvRequestFlags flags);
+int coroutine_fn
+blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset,
+ BdrvRequestFlags flags);
+
+int generated_co_wrapper
+blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
+int coroutine_fn
+blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
+
+int generated_co_wrapper
+blk_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes);
+int coroutine_fn
+blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes);
+
+int generated_co_wrapper blk_do_flush(BlockBackend *blk);
+int coroutine_fn blk_co_do_flush(BlockBackend *blk);
+
#endif /* BLOCK_COROUTINES_INT_H */
diff --git a/block/block-backend.c b/block/block-backend.c
index cecac6748593..2e6ccce7ef2d 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -14,6 +14,7 @@
#include "sysemu/block-backend.h"
#include "block/block_int.h"
#include "block/blockjob.h"
+#include "block/coroutines.h"
#include "block/throttle-groups.h"
#include "hw/qdev-core.h"
#include "sysemu/blockdev.h"
@@ -1204,7 +1205,7 @@ static void coroutine_fn
blk_wait_while_drained(BlockBackend *blk)
}
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
-static int coroutine_fn
+int coroutine_fn
blk_co_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
@@ -1249,7 +1250,7 @@ int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t
offset,
}
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
-static int coroutine_fn
+int coroutine_fn
blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags)
@@ -1306,6 +1307,20 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk,
int64_t offset,
return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags);
}
+static int coroutine_fn blk_pwritev_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes,
+ QEMUIOVector *qiov, size_t
qiov_offset,
+ BdrvRequestFlags flags)
+{
+ int ret;
+
+ blk_inc_in_flight(blk);
+ ret = blk_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags);
+ blk_dec_in_flight(blk);
+
+ return ret;
+}
+
typedef struct BlkRwCo {
BlockBackend *blk;
int64_t offset;
@@ -1314,58 +1329,11 @@ typedef struct BlkRwCo {
BdrvRequestFlags flags;
} BlkRwCo;
-static void blk_read_entry(void *opaque)
-{
- BlkRwCo *rwco = opaque;
- QEMUIOVector *qiov = rwco->iobuf;
-
- rwco->ret = blk_co_do_preadv(rwco->blk, rwco->offset, qiov->size,
- qiov, rwco->flags);
- aio_wait_kick();
-}
-
-static void blk_write_entry(void *opaque)
-{
- BlkRwCo *rwco = opaque;
- QEMUIOVector *qiov = rwco->iobuf;
-
- rwco->ret = blk_co_do_pwritev_part(rwco->blk, rwco->offset, qiov->size,
- qiov, 0, rwco->flags);
- aio_wait_kick();
-}
-
-static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
- int64_t bytes, CoroutineEntry co_entry,
- BdrvRequestFlags flags)
-{
- QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
- BlkRwCo rwco = {
- .blk = blk,
- .offset = offset,
- .iobuf = &qiov,
- .flags = flags,
- .ret = NOT_DONE,
- };
-
- blk_inc_in_flight(blk);
- if (qemu_in_coroutine()) {
- /* Fast-path if already in coroutine context */
- co_entry(&rwco);
- } else {
- Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
- bdrv_coroutine_enter(blk_bs(blk), co);
- BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
- }
- blk_dec_in_flight(blk);
-
- return rwco.ret;
-}
-
int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int bytes, BdrvRequestFlags flags)
{
- return blk_prw(blk, offset, NULL, bytes, blk_write_entry,
- flags | BDRV_REQ_ZERO_WRITE);
+ return blk_pwritev_part(blk, offset, bytes, NULL, 0,
+ flags | BDRV_REQ_ZERO_WRITE);
}
int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
@@ -1510,22 +1478,25 @@ BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk,
int64_t offset,
int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
{
- int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
- if (ret < 0) {
- return ret;
- }
- return count;
+ int ret;
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, count);
+
+ blk_inc_in_flight(blk);
+ ret = blk_do_preadv(blk, offset, count, &qiov, 0);
+ blk_dec_in_flight(blk);
+
+ return ret < 0 ? ret : count;
}
int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
BdrvRequestFlags flags)
{
- int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
- flags);
- if (ret < 0) {
- return ret;
- }
- return count;
+ int ret;
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, count);
+
+ ret = blk_pwritev_part(blk, offset, count, &qiov, 0, flags);
+
+ return ret < 0 ? ret : count;
}
int64_t blk_getlength(BlockBackend *blk)
@@ -1582,7 +1553,7 @@ void blk_aio_cancel_async(BlockAIOCB *acb)
}
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
-static int coroutine_fn
+int coroutine_fn
blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
blk_wait_while_drained(blk);
@@ -1594,18 +1565,15 @@ blk_co_do_ioctl(BlockBackend *blk, unsigned long int
req, void *buf)
return bdrv_co_ioctl(blk_bs(blk), req, buf);
}
-static void blk_ioctl_entry(void *opaque)
-{
- BlkRwCo *rwco = opaque;
- QEMUIOVector *qiov = rwco->iobuf;
-
- rwco->ret = blk_co_do_ioctl(rwco->blk, rwco->offset,
qiov->iov[0].iov_base);
- aio_wait_kick();
-}
-
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
- return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0);
+ int ret;
+
+ blk_inc_in_flight(blk);
+ ret = blk_do_ioctl(blk, req, buf);
+ blk_dec_in_flight(blk);
+
+ return ret;
}
static void blk_aio_ioctl_entry(void *opaque)
@@ -1625,7 +1593,7 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned
long int req, void *buf,
}
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
-static int coroutine_fn
+int coroutine_fn
blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes)
{
int ret;
@@ -1669,22 +1637,19 @@ int coroutine_fn blk_co_pdiscard(BlockBackend *blk,
int64_t offset,
return ret;
}
-static void blk_pdiscard_entry(void *opaque)
-{
- BlkRwCo *rwco = opaque;
- QEMUIOVector *qiov = rwco->iobuf;
-
- rwco->ret = blk_co_do_pdiscard(rwco->blk, rwco->offset, qiov->size);
- aio_wait_kick();
-}
-
int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
{
- return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
+ int ret;
+
+ blk_inc_in_flight(blk);
+ ret = blk_do_pdiscard(blk, offset, bytes);
+ blk_dec_in_flight(blk);
+
+ return ret;
}
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
-static int coroutine_fn blk_co_do_flush(BlockBackend *blk)
+int coroutine_fn blk_co_do_flush(BlockBackend *blk)
{
blk_wait_while_drained(blk);
@@ -1721,16 +1686,15 @@ int coroutine_fn blk_co_flush(BlockBackend *blk)
return ret;
}
-static void blk_flush_entry(void *opaque)
-{
- BlkRwCo *rwco = opaque;
- rwco->ret = blk_co_do_flush(rwco->blk);
- aio_wait_kick();
-}
-
int blk_flush(BlockBackend *blk)
{
- return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0);
+ int ret;
+
+ blk_inc_in_flight(blk);
+ ret = blk_do_flush(blk);
+ blk_dec_in_flight(blk);
+
+ return ret;
}
void blk_drain(BlockBackend *blk)
@@ -2224,8 +2188,9 @@ int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk,
int64_t offset,
int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
int count)
{
- return blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
- BDRV_REQ_WRITE_COMPRESSED);
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, count);
+ return blk_pwritev_part(blk, offset, count, &qiov, 0,
+ BDRV_REQ_WRITE_COMPRESSED);
}
int blk_truncate(BlockBackend *blk, int64_t offset, bool exact,
--
2.31.1
- [PULL 00/15] NBD patches through 2021-10-15, Eric Blake, 2021/10/15
- [PULL 05/15] block-backend: convert blk_co_pdiscard to int64_t bytes, Eric Blake, 2021/10/15
- [PULL 01/15] qcow2: Silence clang -m32 compiler warning, Eric Blake, 2021/10/15
- [PULL 04/15] block-backend: convert blk_co_pwritev_part to int64_t bytes, Eric Blake, 2021/10/15
- [PULL 02/15] block-backend: blk_check_byte_request(): int64_t bytes, Eric Blake, 2021/10/15
- [PULL 06/15] block-backend: rename _do_ helper functions to _co_do_, Eric Blake, 2021/10/15
- [PULL 03/15] block-backend: make blk_co_preadv() 64bit, Eric Blake, 2021/10/15
- [PULL 08/15] block-backend: drop blk_prw, use block-coroutine-wrapper,
Eric Blake <=
- [PULL 11/15] block-backend: convert blk_aio_ functions to int64_t bytes paramter, Eric Blake, 2021/10/15
- [PULL 09/15] block-backend: convert blk_foo wrappers to use int64_t bytes parameter, Eric Blake, 2021/10/15
- [PULL 10/15] block-backend: convert blk_co_copy_range to int64_t bytes, Eric Blake, 2021/10/15
- [PULL 07/15] block-coroutine-wrapper.py: support BlockBackend first argument, Eric Blake, 2021/10/15
- [PULL 12/15] block-backend: blk_pread, blk_pwrite: rename count parameter to bytes, Eric Blake, 2021/10/15
- [PULL 14/15] block-backend: fix blk_co_flush prototype to mention coroutine_fn, Eric Blake, 2021/10/15
- [PULL 15/15] block-backend: update blk_co_pwrite() and blk_co_pread() wrappers, Eric Blake, 2021/10/15
- [PULL 13/15] block-backend: drop INT_MAX restriction from blk_check_byte_request(), Eric Blake, 2021/10/15
- Re: [PULL 00/15] NBD patches through 2021-10-15, Richard Henderson, 2021/10/15