[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v5 6/6] vdpa: Always start CVQ in SVQ mode
From: |
Michael S. Tsirkin |
Subject: |
Re: [PATCH v5 6/6] vdpa: Always start CVQ in SVQ mode |
Date: |
Mon, 31 Oct 2022 04:25:19 -0400 |
On Tue, Oct 11, 2022 at 12:41:54PM +0200, Eugenio Pérez wrote:
> Isolate control virtqueue in its own group, allowing to intercept control
> commands but letting dataplane run totally passthrough to the guest.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
I guess we need svq for this. Not a reason to allocate it for
all queues. Also if vdpa does not support pasid then I guess
we should not bother with svq.
> ---
> v5:
> * Fixing the not adding cvq buffers when x-svq=on is specified.
> * Move vring state in vhost_vdpa_get_vring_group instead of using a
> parameter.
> * Rename VHOST_VDPA_NET_CVQ_PASSTHROUGH to VHOST_VDPA_NET_DATA_ASID
>
> v4:
> * Squash vhost_vdpa_cvq_group_is_independent.
> * Rebased on last CVQ start series, that allocated CVQ cmd bufs at load
> * Do not check for cvq index on vhost_vdpa_net_prepare, we only have one
> that callback registered in that NetClientInfo.
>
> v3:
> * Make asid related queries print a warning instead of returning an
> error and stop the start of qemu.
> ---
> hw/virtio/vhost-vdpa.c | 3 +-
> net/vhost-vdpa.c | 118 +++++++++++++++++++++++++++++++++++++++--
> 2 files changed, 115 insertions(+), 6 deletions(-)
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 29d009c02b..fd4de06eab 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -682,7 +682,8 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev
> *dev)
> {
> uint64_t features;
> uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
> - 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
> + 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
> + 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID;
> int r;
>
> if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index f7831aeb8d..6f6ef59ea3 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -38,6 +38,9 @@ typedef struct VhostVDPAState {
> void *cvq_cmd_out_buffer;
> virtio_net_ctrl_ack *status;
>
> + /* Number of address spaces supported by the device */
> + unsigned address_space_num;
> +
> /* The device always have SVQ enabled */
> bool always_svq;
> bool started;
> @@ -102,6 +105,9 @@ static const uint64_t vdpa_svq_device_features =
> BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
> BIT_ULL(VIRTIO_NET_F_STANDBY);
>
> +#define VHOST_VDPA_NET_DATA_ASID 0
> +#define VHOST_VDPA_NET_CVQ_ASID 1
> +
> VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
> {
> VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> @@ -226,6 +232,34 @@ static NetClientInfo net_vhost_vdpa_info = {
> .check_peer_type = vhost_vdpa_check_peer_type,
> };
>
> +static uint32_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index)
> +{
> + struct vhost_vring_state state = {
> + .index = vq_index,
> + };
> + int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
> +
> + return r < 0 ? 0 : state.num;
> +}
> +
> +static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
> + unsigned vq_group,
> + unsigned asid_num)
> +{
> + struct vhost_vring_state asid = {
> + .index = vq_group,
> + .num = asid_num,
> + };
> + int ret;
> +
> + ret = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
> + if (unlikely(ret < 0)) {
> + warn_report("Can't set vq group %u asid %u, errno=%d (%s)",
> + asid.index, asid.num, errno, g_strerror(errno));
> + }
> + return ret;
> +}
> +
> static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
> {
> VhostIOVATree *tree = v->iova_tree;
> @@ -300,11 +334,50 @@ dma_map_err:
> static int vhost_vdpa_net_cvq_start(NetClientState *nc)
> {
> VhostVDPAState *s;
> - int r;
> + struct vhost_vdpa *v;
> + uint32_t cvq_group;
> + int cvq_index, r;
>
> assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
>
> s = DO_UPCAST(VhostVDPAState, nc, nc);
> + v = &s->vhost_vdpa;
> +
> + v->listener_shadow_vq = s->always_svq;
> + v->shadow_vqs_enabled = s->always_svq;
> + s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_DATA_ASID;
> +
> + if (s->always_svq) {
> + goto out;
> + }
> +
> + if (s->address_space_num < 2) {
> + return 0;
> + }
> +
> + /**
> + * Check if all the virtqueues of the virtio device are in a different vq
> + * than the last vq. VQ group of last group passed in cvq_group.
> + */
> + cvq_index = v->dev->vq_index_end - 1;
> + cvq_group = vhost_vdpa_get_vring_group(v->device_fd, cvq_index);
> + for (int i = 0; i < cvq_index; ++i) {
> + uint32_t group = vhost_vdpa_get_vring_group(v->device_fd, i);
> +
> + if (unlikely(group == cvq_group)) {
> + warn_report("CVQ %u group is the same as VQ %u one (%u)",
> cvq_group,
> + i, group);
> + return 0;
> + }
> + }
> +
> + r = vhost_vdpa_set_address_space_id(v, cvq_group,
> VHOST_VDPA_NET_CVQ_ASID);
> + if (r == 0) {
> + v->shadow_vqs_enabled = true;
> + s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
> + }
> +
> +out:
> if (!s->vhost_vdpa.shadow_vqs_enabled) {
> return 0;
> }
> @@ -576,12 +649,38 @@ static const VhostShadowVirtqueueOps
> vhost_vdpa_net_svq_ops = {
> .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
> };
>
> +static uint32_t vhost_vdpa_get_as_num(int vdpa_device_fd)
> +{
> + uint64_t features;
> + unsigned num_as;
> + int r;
> +
> + r = ioctl(vdpa_device_fd, VHOST_GET_BACKEND_FEATURES, &features);
> + if (unlikely(r < 0)) {
> + warn_report("Cannot get backend features");
> + return 1;
> + }
> +
> + if (!(features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
> + return 1;
> + }
> +
> + r = ioctl(vdpa_device_fd, VHOST_VDPA_GET_AS_NUM, &num_as);
> + if (unlikely(r < 0)) {
> + warn_report("Cannot retrieve number of supported ASs");
> + return 1;
> + }
> +
> + return num_as;
> +}
> +
> static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> const char *device,
> const char *name,
> int vdpa_device_fd,
> int queue_pair_index,
> int nvqs,
> + unsigned nas,
> bool is_datapath,
> bool svq,
> VhostIOVATree *iova_tree)
> @@ -600,6 +699,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState
> *peer,
> snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA);
> s = DO_UPCAST(VhostVDPAState, nc, nc);
>
> + s->address_space_num = nas;
> s->vhost_vdpa.device_fd = vdpa_device_fd;
> s->vhost_vdpa.index = queue_pair_index;
> s->always_svq = svq;
> @@ -686,6 +786,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char
> *name,
> g_autoptr(VhostIOVATree) iova_tree = NULL;
> NetClientState *nc;
> int queue_pairs, r, i = 0, has_cvq = 0;
> + unsigned num_as = 1;
> + bool svq_cvq;
>
> assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> opts = &netdev->u.vhost_vdpa;
> @@ -711,7 +813,13 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char
> *name,
> return queue_pairs;
> }
>
> - if (opts->x_svq) {
> + svq_cvq = opts->x_svq;
> + if (has_cvq && !opts->x_svq) {
> + num_as = vhost_vdpa_get_as_num(vdpa_device_fd);
> + svq_cvq = num_as > 1;
> + }
> +
> + if (opts->x_svq || svq_cvq) {
> struct vhost_vdpa_iova_range iova_range;
>
> uint64_t invalid_dev_features =
> @@ -734,15 +842,15 @@ int net_init_vhost_vdpa(const Netdev *netdev, const
> char *name,
>
> for (i = 0; i < queue_pairs; i++) {
> ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> - vdpa_device_fd, i, 2, true, opts->x_svq,
> - iova_tree);
> + vdpa_device_fd, i, 2, num_as, true,
> + opts->x_svq, iova_tree);
> if (!ncs[i])
> goto err;
> }
>
> if (has_cvq) {
> nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> - vdpa_device_fd, i, 1, false,
> + vdpa_device_fd, i, 1, num_as, false,
> opts->x_svq, iova_tree);
> if (!nc)
> goto err;
> --
> 2.31.1
- [PATCH v5 0/6] ASID support in vhost-vdpa net, Eugenio Pérez, 2022/10/11
- [PATCH v5 1/6] vdpa: Use v->shadow_vqs_enabled in vhost_vdpa_svqs_start & stop, Eugenio Pérez, 2022/10/11
- [PATCH v5 2/6] vdpa: Allocate SVQ unconditionally, Eugenio Pérez, 2022/10/11
- [PATCH v5 3/6] vdpa: Add asid parameter to vhost_vdpa_dma_map/unmap, Eugenio Pérez, 2022/10/11
- [PATCH v5 4/6] vdpa: Store x-svq parameter in VhostVDPAState, Eugenio Pérez, 2022/10/11
- [PATCH v5 5/6] vdpa: Add listener_shadow_vq to vhost_vdpa, Eugenio Pérez, 2022/10/11
- [PATCH v5 6/6] vdpa: Always start CVQ in SVQ mode, Eugenio Pérez, 2022/10/11
- Re: [PATCH v5 6/6] vdpa: Always start CVQ in SVQ mode,
Michael S. Tsirkin <=