qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 15/18] vhost-net: control virtqueue support


From: Jason Wang
Subject: Re: [PATCH 15/18] vhost-net: control virtqueue support
Date: Thu, 1 Jul 2021 11:03:30 +0800
User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Thunderbird/78.11.0


在 2021/7/1 上午1:33, Eugenio Perez Martin 写道:
On Mon, Jun 21, 2021 at 6:18 AM Jason Wang <jasowang@redhat.com> wrote:
We assume there's no cvq in the past, this is not true when we need
control virtqueue support for vhost-user backends. So this patch
implements the control virtqueue support for vhost-net. As datapath,
the control virtqueue is also required to be coupled with the
NetClientState. The vhost_net_start/stop() are tweaked to accept the
number of datapath queue pairs plus the the number of control
virtqueue for us to start and stop the vhost device.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
  hw/net/vhost_net.c      | 43 ++++++++++++++++++++++++++++++-----------
  hw/net/virtio-net.c     |  4 ++--
  include/net/vhost_net.h |  6 ++++--
  3 files changed, 38 insertions(+), 15 deletions(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index ef1370bd92..fe2fd7e3d5 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -311,11 +311,14 @@ static void vhost_net_stop_one(struct vhost_net *net,
  }

  int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
-                    int total_queues)
+                    int data_qps, int cvq)
I can see the convenience of being an int, but maybe it is more clear
to use a boolean?


I tend to leave this for future extensions. E.g we may have more than one cvqs.



  {
      BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
      VirtioBusState *vbus = VIRTIO_BUS(qbus);
      VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
+    int total_notifiers = data_qps * 2 + cvq;
+    VirtIONet *n = VIRTIO_NET(dev);
+    int nvhosts = data_qps + cvq;
      struct vhost_net *net;
      int r, e, i;
      NetClientState *peer;
@@ -325,9 +328,14 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
          return -ENOSYS;
      }

-    for (i = 0; i < total_queues; i++) {
+    for (i = 0; i < nvhosts; i++) {
+
+        if (i < data_qps) {
+            peer = qemu_get_peer(ncs, i);
+        } else { /* Control Virtqueue */
+            peer = qemu_get_peer(ncs, n->max_qps);
The field max_qps should be max_queues until the next patch, or maybe
we can reorder the commits and then rename the field before this
commit?


You're right, let me re-order the patches.

Thanks



Same comment later on this function and in vhost_net_stop.

Thanks!

+        }

-        peer = qemu_get_peer(ncs, i);
          net = get_vhost_net(peer);
          vhost_net_set_vq_index(net, i * 2);

@@ -340,14 +348,18 @@ int vhost_net_start(VirtIODevice *dev, NetClientState 
*ncs,
          }
       }

-    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
+    r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
      if (r < 0) {
          error_report("Error binding guest notifier: %d", -r);
          goto err;
      }

-    for (i = 0; i < total_queues; i++) {
-        peer = qemu_get_peer(ncs, i);
+    for (i = 0; i < nvhosts; i++) {
+        if (i < data_qps) {
+            peer = qemu_get_peer(ncs, i);
+        } else {
+            peer = qemu_get_peer(ncs, n->max_qps);
+        }
          r = vhost_net_start_one(get_vhost_net(peer), dev);

          if (r < 0) {
@@ -371,7 +383,7 @@ err_start:
          peer = qemu_get_peer(ncs , i);
          vhost_net_stop_one(get_vhost_net(peer), dev);
      }
-    e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
+    e = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
      if (e < 0) {
          fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
          fflush(stderr);
@@ -381,18 +393,27 @@ err:
  }

  void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
-                    int total_queues)
+                    int data_qps, int cvq)
  {
      BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
      VirtioBusState *vbus = VIRTIO_BUS(qbus);
      VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
+    VirtIONet *n = VIRTIO_NET(dev);
+    NetClientState *peer;
+    int total_notifiers = data_qps * 2 + cvq;
+    int nvhosts = data_qps + cvq;
      int i, r;

-    for (i = 0; i < total_queues; i++) {
-        vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
+    for (i = 0; i < nvhosts; i++) {
+        if (i < data_qps) {
+            peer = qemu_get_peer(ncs, i);
+        } else {
+            peer = qemu_get_peer(ncs, n->max_qps);
+        }
+        vhost_net_stop_one(get_vhost_net(peer), dev);
      }

-    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
+    r = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
      if (r < 0) {
          fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
          fflush(stderr);
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index bd7958b9f0..614660274c 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -285,14 +285,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t 
status)
          }

          n->vhost_started = 1;
-        r = vhost_net_start(vdev, n->nic->ncs, queues);
+        r = vhost_net_start(vdev, n->nic->ncs, queues, 0);
          if (r < 0) {
              error_report("unable to start vhost net: %d: "
                           "falling back on userspace virtio", -r);
              n->vhost_started = 0;
          }
      } else {
-        vhost_net_stop(vdev, n->nic->ncs, queues);
+        vhost_net_stop(vdev, n->nic->ncs, queues, 0);
          n->vhost_started = 0;
      }
  }
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index fba40cf695..e656e38af9 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -21,8 +21,10 @@ typedef struct VhostNetOptions {
  uint64_t vhost_net_get_max_queues(VHostNetState *net);
  struct vhost_net *vhost_net_init(VhostNetOptions *options);

-int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
-void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
+int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
+                    int data_qps, int cvq);
+void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
+                    int data_qps, int cvq);

  void vhost_net_cleanup(VHostNetState *net);

--
2.25.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]