qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC v1 2/4] vhost-vdpa: introduce vhost-vdpa net client


From: Jason Wang
Subject: Re: [RFC v1 2/4] vhost-vdpa: introduce vhost-vdpa net client
Date: Tue, 21 Apr 2020 11:40:01 +0800
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.9.0


On 2020/4/20 下午5:32, Cindy Lu wrote:
This patch set introduces a new net client type: vhost-vdpa.
vhost-vdpa net client will set up a vDPA device which is svhostdevpecified


typo.


by a "vhostdev" parameter.

Author: Tiwei Bie


Please keep the sobs from the original patch.


Signed-off-by: Cindy Lu <address@hidden>
---
  include/net/vhost-vdpa.h |  18 ++++
  include/net/vhost_net.h  |   1 +
  net/Makefile.objs        |   2 +-
  net/clients.h            |   2 +
  net/net.c                |   1 +
  net/vhost-vdpa.c         | 211 +++++++++++++++++++++++++++++++++++++++
  qapi/net.json            |  21 +++-
  7 files changed, 253 insertions(+), 3 deletions(-)
  create mode 100644 include/net/vhost-vdpa.h
  create mode 100644 net/vhost-vdpa.c


I think this patch should come after patch 3.

And it's better to make this as a module like vhost-user.



diff --git a/include/net/vhost-vdpa.h b/include/net/vhost-vdpa.h
new file mode 100644
index 0000000000..9ddd538dad
--- /dev/null
+++ b/include/net/vhost-vdpa.h
@@ -0,0 +1,18 @@
+/*
+ * vhost-vdpa.h
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.


2020 and please add Red Hat here as well.


+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef VHOST_VDPA_H
+#define VHOST_VDPA_H
+
+struct vhost_net;
+struct vhost_net *vhost_vdpa_get_vhost_net(NetClientState *nc);
+uint64_t vhost_vdpa_get_acked_features(NetClientState *nc);
+
+#endif /* VHOST_VDPA_H */
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index 77e47398c4..6f3a624cf7 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -39,5 +39,6 @@ int vhost_set_vring_enable(NetClientState * nc, int enable);
  uint64_t vhost_net_get_acked_features(VHostNetState *net);
int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu);
+int vhost_set_state(NetClientState *nc, int state);


This function is not used in this patch.


#endif
diff --git a/net/Makefile.objs b/net/Makefile.objs
index c5d076d19c..da459cfc19 100644
--- a/net/Makefile.objs
+++ b/net/Makefile.objs
@@ -26,7 +26,7 @@ tap-obj-$(CONFIG_SOLARIS) = tap-solaris.o
  tap-obj-y ?= tap-stub.o
  common-obj-$(CONFIG_POSIX) += tap.o $(tap-obj-y)
  common-obj-$(CONFIG_WIN32) += tap-win32.o
-
+common-obj-$(CONFIG_VHOST_KERNEL) += vhost-vdpa.o
  vde.o-libs = $(VDE_LIBS)
common-obj-$(CONFIG_CAN_BUS) += can/
diff --git a/net/clients.h b/net/clients.h
index a6ef267e19..92f9b59aed 100644
--- a/net/clients.h
+++ b/net/clients.h
@@ -61,4 +61,6 @@ int net_init_netmap(const Netdev *netdev, const char *name,
  int net_init_vhost_user(const Netdev *netdev, const char *name,
                          NetClientState *peer, Error **errp);
+int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
+                        NetClientState *peer, Error **errp);
  #endif /* QEMU_NET_CLIENTS_H */
diff --git a/net/net.c b/net/net.c
index ac5080dda1..2beb95388a 100644
--- a/net/net.c
+++ b/net/net.c
@@ -964,6 +964,7 @@ static int (* const 
net_client_init_fun[NET_CLIENT_DRIVER__MAX])(
          [NET_CLIENT_DRIVER_HUBPORT]   = net_init_hubport,
  #ifdef CONFIG_VHOST_NET_USER
          [NET_CLIENT_DRIVER_VHOST_USER] = net_init_vhost_user,
+        [NET_CLIENT_DRIVER_VHOST_VDPA] = net_init_vhost_vdpa,
  #endif
  #ifdef CONFIG_L2TPV3
          [NET_CLIENT_DRIVER_L2TPV3]    = net_init_l2tpv3,
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
new file mode 100644
index 0000000000..5daeba0b76
--- /dev/null
+++ b/net/vhost-vdpa.c
@@ -0,0 +1,211 @@
+/*
+ * vhost-vdpa.c
+ *
+ * Copyright(c) 2017-2018 Intel Corporation. All rights reserved.
+ * Copyright(c) 2020 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "clients.h"
+#include "net/vhost_net.h"
+#include "net/vhost-vdpa.h"
+#include "hw/virtio/vhost-vdpa.h"
+#include "chardev/char-fe.h"


I don't think we use charfe in this file.


+#include "qemu/config-file.h"
+#include "qemu/error-report.h"
+#include "qemu/option.h"
+#include "qapi/error.h"
+#include "trace.h"


I think we don't use tracing in this file.


+#include <linux/vfio.h>
+#include <sys/ioctl.h>
+#include <err.h>
+#include <linux/virtio_net.h>
+
+
+typedef struct VhostVDPAState {
+    NetClientState nc;


This may not work for the case of multiqueue, you can either fix this or just leave a comment for TODO.


+    struct vhost_vdpa vhost_vdpa;


This explains why patch 3 should come first.


+    VHostNetState *vhost_net;
+    uint64_t acked_features;
+    bool started;
+} VhostVDPAState;
+
+VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
+{
+    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+    return s->vhost_net;
+}
+
+uint64_t vhost_vdpa_get_acked_features(NetClientState *nc)
+{
+    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+    return s->acked_features;
+}
+
+static void vhost_vdpa_stop(NetClientState *ncs)
+{
+    VhostVDPAState *s;
+
+    assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+    s = DO_UPCAST(VhostVDPAState, nc, ncs);
+
+    if (s->vhost_net) {
+        /* save acked features */
+        uint64_t features = vhost_net_get_acked_features(s->vhost_net);
+        if (features) {
+            s->acked_features = features;
+         }
+        vhost_net_cleanup(s->vhost_net);
+    }
+}
+
+static int vhost_vdpa_start(NetClientState *ncs, void *be)
+{


The name of this function is confusing, we don't start vhost_vdpa actually.


+    VhostNetOptions options;
+    struct vhost_net *net = NULL;
+    VhostVDPAState *s;
+
+    options.backend_type = VHOST_BACKEND_TYPE_VDPA;
+
+    assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+    s = DO_UPCAST(VhostVDPAState, nc, ncs);
+
+    options.net_backend = ncs;
+    options.opaque      = be;
+    options.busyloop_timeout = 0;
+    net = vhost_net_init(&options);
+    if (!net) {
+        error_report("failed to init vhost_net for queue");
+        goto err;
+     }
+
+     if (s->vhost_net) {
+        vhost_net_cleanup(s->vhost_net);
+        g_free(s->vhost_net);
+     }
+     s->vhost_net = net;
+
+    return 0;
+
+err:
+    if (net) {
+        vhost_net_cleanup(net);
+    }
+    vhost_vdpa_stop(ncs);
+    return -1;
+}
+static void vhost_vdpa_cleanup(NetClientState *nc)
+{
+    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+
+    if (s->vhost_net) {
+        vhost_net_cleanup(s->vhost_net);
+        g_free(s->vhost_net);
+        s->vhost_net = NULL;
+    }
+
+    qemu_purge_queued_packets(nc);
+}
+
+static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
+{
+    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+    return true;
+}
+
+static bool vhost_vdpa_has_ufo(NetClientState *nc)
+{
+    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+    uint64_t  features = 0;
+
+    features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
+    features = vhost_net_get_features(s->vhost_net, features);
+    return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
+
+}
+
+static NetClientInfo net_vhost_vdpa_info = {
+        .type = NET_CLIENT_DRIVER_VHOST_VDPA,
+        .size = sizeof(VhostVDPAState),
+        .cleanup = vhost_vdpa_cleanup,
+        .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
+        .has_ufo = vhost_vdpa_has_ufo,
+};
+
+static int net_vhost_vdpa_init(NetClientState *peer, const char *device,
+                               const char *name, const char *vhostdev)
+{
+    NetClientState *nc = NULL;
+    VhostVDPAState *s;
+    int vdpa_device_fd;
+    assert(name);
+
+    nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, name);
+    snprintf(nc->info_str, sizeof(nc->info_str), "vhost-vdpa");
+    nc->queue_index = 0;
+
+    s = DO_UPCAST(VhostVDPAState, nc, nc);
+
+    vdpa_device_fd = open(vhostdev, O_RDWR);
+    if (vdpa_device_fd == -1) {
+        return -errno;
+    }
+    s->vhost_vdpa.device_fd = vdpa_device_fd;


Need to add a step to verify the device type and fail if it was not a networking device.


+    vhost_vdpa_start(nc, (void *)&s->vhost_vdpa);
+
+    assert(s->vhost_net);
+
+    return 0;
+}
+
+static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp)
+{
+    const char *name = opaque;
+    const char *driver, *netdev;
+
+    driver = qemu_opt_get(opts, "driver");
+    netdev = qemu_opt_get(opts, "netdev");
+
+    if (!driver || !netdev) {
+        return 0;
+    }
+
+    if (strcmp(netdev, name) == 0 &&
+        !g_str_has_prefix(driver, "virtio-net-")) {
+        error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
+        return -1;
+    }
+
+    return 0;
+}
+
+int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
+                        NetClientState *peer, Error **errp)
+{
+    const NetdevVhostVDPAOptions *vhost_vdpa_opts;
+
+    assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+    vhost_vdpa_opts = &netdev->u.vhost_vdpa;
+
+    /* verify net frontend */
+    if (qemu_opts_foreach(qemu_find_opts("device"), net_vhost_check_net,
+                          (char *)name, errp)) {
+        return -1;
+    }
+
+
+    return net_vhost_vdpa_init(peer, "vhost_vdpa", name,
+                               vhost_vdpa_opts->vhostdev);


Please add the support for passing fd via command line.


+
+    return 0;
+}
diff --git a/qapi/net.json b/qapi/net.json
index 335295be50..35a5ccee39 100644
--- a/qapi/net.json
+++ b/qapi/net.json
@@ -441,6 +441,22 @@
      '*queues':        'int' } }
##
+# @NetdevVhostVDPAOptions:
+#
+# Vhost-vdpa network backend
+#
+# @vhostdev: name of a mdev dev path in sysfs
+#
+# @queues: number of queues to be created for multiqueue vhost-vdpa
+#          (default: 1) (Since 2.11)
+#
+# Since: 2.11


The version number is wrong, I guess it's probably 5.1.

Thanks


+##
+{ 'struct': 'NetdevVhostVDPAOptions',
+  'data': {
+    '*vhostdev':     'str',
+    '*queues':       'int' } }
+##
  # @NetClientDriver:
  #
  # Available netdev drivers.
@@ -451,7 +467,7 @@
  ##
  { 'enum': 'NetClientDriver',
    'data': [ 'none', 'nic', 'user', 'tap', 'l2tpv3', 'socket', 'vde',
-            'bridge', 'hubport', 'netmap', 'vhost-user' ] }
+            'bridge', 'hubport', 'netmap', 'vhost-user', 'vhost-vdpa' ] }
##
  # @Netdev:
@@ -479,7 +495,8 @@
      'bridge':   'NetdevBridgeOptions',
      'hubport':  'NetdevHubPortOptions',
      'netmap':   'NetdevNetmapOptions',
-    'vhost-user': 'NetdevVhostUserOptions' } }
+    'vhost-user': 'NetdevVhostUserOptions',
+    'vhost-vdpa': 'NetdevVhostVDPAOptions' } }
##
  # @NetLegacy:




reply via email to

[Prev in Thread] Current Thread [Next in Thread]