qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PULL v2 23/28] block/export: add vhost-user-blk multi-queue support


From: Stefan Hajnoczi
Subject: [PULL v2 23/28] block/export: add vhost-user-blk multi-queue support
Date: Thu, 22 Oct 2020 12:27:21 +0100

Allow the number of queues to be configured using --export
vhost-user-blk,num-queues=N. This setting should match the QEMU --device
vhost-user-blk-pci,num-queues=N setting but QEMU vhost-user-blk.c lowers
its own value if the vhost-user-blk backend offers fewer queues than
QEMU.

The vhost-user-blk-server.c code is already capable of multi-queue. All
virtqueue processing runs in the same AioContext. No new locking is
needed.

Add the num-queues=N option and set the VIRTIO_BLK_F_MQ feature bit.
Note that the feature bit only announces the presence of the num_queues
configuration space field. It does not promise that there is more than 1
virtqueue, so we can set it unconditionally.

I tested multi-queue by running a random read fio test with numjobs=4 on
an -smp 4 guest. After the benchmark finished the guest /proc/interrupts
file showed activity on all 4 virtio-blk MSI-X. The /sys/block/vda/mq/
directory shows that Linux blk-mq has 4 queues configured.

An automated test is included in the next commit.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Acked-by: Markus Armbruster <armbru@redhat.com>
Message-id: 20201001144604.559733-2-stefanha@redhat.com
[Fixed accidental tab characters as suggested by Markus Armbruster
--Stefan]
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 qapi/block-export.json               | 10 +++++++---
 block/export/vhost-user-blk-server.c | 24 ++++++++++++++++++------
 2 files changed, 25 insertions(+), 9 deletions(-)

diff --git a/qapi/block-export.json b/qapi/block-export.json
index 8a4ced817f..480c497690 100644
--- a/qapi/block-export.json
+++ b/qapi/block-export.json
@@ -93,11 +93,15 @@
 #        SocketAddress types are supported. Passed fds must be UNIX domain
 #        sockets.
 # @logical-block-size: Logical block size in bytes. Defaults to 512 bytes.
+# @num-queues: Number of request virtqueues. Must be greater than 0. Defaults
+#              to 1.
 #
 # Since: 5.2
 ##
 { 'struct': 'BlockExportOptionsVhostUserBlk',
-  'data': { 'addr': 'SocketAddress', '*logical-block-size': 'size' } }
+  'data': { 'addr': 'SocketAddress',
+           '*logical-block-size': 'size',
+            '*num-queues': 'uint16'} }
 
 ##
 # @NbdServerAddOptions:
@@ -233,8 +237,8 @@
 { 'union': 'BlockExportOptions',
   'base': { 'type': 'BlockExportType',
             'id': 'str',
-           '*fixed-iothread': 'bool',
-           '*iothread': 'str',
+            '*fixed-iothread': 'bool',
+            '*iothread': 'str',
             'node-name': 'str',
             '*writable': 'bool',
             '*writethrough': 'bool' },
diff --git a/block/export/vhost-user-blk-server.c 
b/block/export/vhost-user-blk-server.c
index 286eb5fb9a..41f4933d6e 100644
--- a/block/export/vhost-user-blk-server.c
+++ b/block/export/vhost-user-blk-server.c
@@ -21,7 +21,7 @@
 #include "util/block-helpers.h"
 
 enum {
-    VHOST_USER_BLK_MAX_QUEUES = 1,
+    VHOST_USER_BLK_NUM_QUEUES_DEFAULT = 1,
 };
 struct virtio_blk_inhdr {
     unsigned char status;
@@ -242,6 +242,7 @@ static uint64_t vu_blk_get_features(VuDev *dev)
                1ull << VIRTIO_BLK_F_DISCARD |
                1ull << VIRTIO_BLK_F_WRITE_ZEROES |
                1ull << VIRTIO_BLK_F_CONFIG_WCE |
+               1ull << VIRTIO_BLK_F_MQ |
                1ull << VIRTIO_F_VERSION_1 |
                1ull << VIRTIO_RING_F_INDIRECT_DESC |
                1ull << VIRTIO_RING_F_EVENT_IDX |
@@ -338,7 +339,9 @@ static void blk_aio_detach(void *opaque)
 
 static void
 vu_blk_initialize_config(BlockDriverState *bs,
-                           struct virtio_blk_config *config, uint32_t blk_size)
+                         struct virtio_blk_config *config,
+                         uint32_t blk_size,
+                         uint16_t num_queues)
 {
     config->capacity = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
     config->blk_size = blk_size;
@@ -346,7 +349,7 @@ vu_blk_initialize_config(BlockDriverState *bs,
     config->seg_max = 128 - 2;
     config->min_io_size = 1;
     config->opt_io_size = 1;
-    config->num_queues = VHOST_USER_BLK_MAX_QUEUES;
+    config->num_queues = num_queues;
     config->max_discard_sectors = 32768;
     config->max_discard_seg = 1;
     config->discard_sector_alignment = config->blk_size >> 9;
@@ -368,6 +371,7 @@ static int vu_blk_exp_create(BlockExport *exp, 
BlockExportOptions *opts,
     BlockExportOptionsVhostUserBlk *vu_opts = &opts->u.vhost_user_blk;
     Error *local_err = NULL;
     uint64_t logical_block_size;
+    uint16_t num_queues = VHOST_USER_BLK_NUM_QUEUES_DEFAULT;
 
     vexp->writable = opts->writable;
     vexp->blkcfg.wce = 0;
@@ -385,15 +389,23 @@ static int vu_blk_exp_create(BlockExport *exp, 
BlockExportOptions *opts,
     }
     vexp->blk_size = logical_block_size;
     blk_set_guest_block_size(exp->blk, logical_block_size);
+
+    if (vu_opts->has_num_queues) {
+        num_queues = vu_opts->num_queues;
+    }
+    if (num_queues == 0) {
+        error_setg(errp, "num-queues must be greater than 0");
+        return -EINVAL;
+    }
+
     vu_blk_initialize_config(blk_bs(exp->blk), &vexp->blkcfg,
-                               logical_block_size);
+                             logical_block_size, num_queues);
 
     blk_add_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach,
                                  vexp);
 
     if (!vhost_user_server_start(&vexp->vu_server, vu_opts->addr, exp->ctx,
-                                 VHOST_USER_BLK_MAX_QUEUES, &vu_blk_iface,
-                                 errp)) {
+                                 num_queues, &vu_blk_iface, errp)) {
         blk_remove_aio_context_notifier(exp->blk, blk_aio_attached,
                                         blk_aio_detach, vexp);
         return -EADDRNOTAVAIL;
-- 
2.26.2


reply via email to

[Prev in Thread] Current Thread [Next in Thread]