qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC PATCH v7 25/25] vdpa: Add x-cvq-svq


From: Eugenio Perez Martin
Subject: Re: [RFC PATCH v7 25/25] vdpa: Add x-cvq-svq
Date: Mon, 18 Apr 2022 16:16:35 +0200

On Thu, Apr 14, 2022 at 11:10 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Thu, Apr 14, 2022 at 12:33 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> >
> > This isolates shadow cvq in its own group.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> >  qapi/net.json    |  8 +++-
> >  net/vhost-vdpa.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++--
> >  2 files changed, 100 insertions(+), 6 deletions(-)
> >
> > diff --git a/qapi/net.json b/qapi/net.json
> > index 92848e4362..39c245e6cd 100644
> > --- a/qapi/net.json
> > +++ b/qapi/net.json
> > @@ -447,9 +447,12 @@
> >  #
> >  # @x-svq: Start device with (experimental) shadow virtqueue. (Since 7.1)
> >  #         (default: false)
> > +# @x-cvq-svq: Start device with (experimental) shadow virtqueue in its own
> > +#             virtqueue group. (Since 7.1)
> > +#             (default: false)
> >  #
> >  # Features:
> > -# @unstable: Member @x-svq is experimental.
> > +# @unstable: Members @x-svq and x-cvq-svq are experimental.
> >  #
> >  # Since: 5.1
> >  ##
> > @@ -457,7 +460,8 @@
> >    'data': {
> >      '*vhostdev':     'str',
> >      '*queues':       'int',
> > -    '*x-svq':        {'type': 'bool', 'features' : [ 'unstable'] } } }
> > +    '*x-svq':        {'type': 'bool', 'features' : [ 'unstable'] },
> > +    '*x-cvq-svq':    {'type': 'bool', 'features' : [ 'unstable'] } } }
> >
> >  ##
> >  # @NetClientDriver:
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index a6f803ea4e..851dacb902 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -377,6 +377,17 @@ static int vhost_vdpa_get_features(int fd, uint64_t 
> > *features, Error **errp)
> >      return ret;
> >  }
> >
> > +static int vhost_vdpa_get_backend_features(int fd, uint64_t *features,
> > +                                           Error **errp)
> > +{
> > +    int ret = ioctl(fd, VHOST_GET_BACKEND_FEATURES, features);
> > +    if (ret) {
> > +        error_setg_errno(errp, errno,
> > +            "Fail to query backend features from vhost-vDPA device");
> > +    }
> > +    return ret;
> > +}
> > +
> >  static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
> >                                            int *has_cvq, Error **errp)
> >  {
> > @@ -410,16 +421,56 @@ static int vhost_vdpa_get_max_queue_pairs(int fd, 
> > uint64_t features,
> >      return 1;
> >  }
> >
> > +/**
> > + * Check vdpa device to support CVQ group asid 1
> > + *
> > + * @vdpa_device_fd: Vdpa device fd
> > + * @queue_pairs: Queue pairs
> > + * @errp: Error
> > + */
> > +static int vhost_vdpa_check_cvq_svq(int vdpa_device_fd, int queue_pairs,
> > +                                    Error **errp)
> > +{
> > +    uint64_t backend_features;
> > +    unsigned num_as;
> > +    int r;
> > +
> > +    r = vhost_vdpa_get_backend_features(vdpa_device_fd, &backend_features,
> > +                                        errp);
> > +    if (unlikely(r)) {
> > +        return -1;
> > +    }
> > +
> > +    if (unlikely(!(backend_features & VHOST_BACKEND_F_IOTLB_ASID))) {
> > +        error_setg(errp, "Device without IOTLB_ASID feature");
> > +        return -1;
> > +    }
> > +
> > +    r = ioctl(vdpa_device_fd, VHOST_VDPA_GET_AS_NUM, &num_as);
> > +    if (unlikely(r)) {
> > +        error_setg_errno(errp, errno,
> > +                         "Cannot retrieve number of supported ASs");
> > +        return -1;
> > +    }
> > +    if (unlikely(num_as < 2)) {
> > +        error_setg(errp, "Insufficient number of ASs (%u, min: 2)", 
> > num_as);
> > +    }
> > +
>
> This is not sufficient, we still need to check whether CVQ doesn't
> share a group with other virtqueues.
>

That is done at vhost-vdpa.c:vhost_dev_is_independent_group. This is
because we don't know the cvq index at this moment: Since the guest
still has not acked features, we don't know if cvq is at index 2 or is
the last one the device offers.

> Thanks
>
> > +    return 0;
> > +}
> > +
> >  int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> >                          NetClientState *peer, Error **errp)
> >  {
> >      const NetdevVhostVDPAOptions *opts;
> > +    struct vhost_vdpa_iova_range iova_range;
> >      uint64_t features;
> >      int vdpa_device_fd;
> >      g_autofree NetClientState **ncs = NULL;
> >      NetClientState *nc;
> >      int queue_pairs, r, i, has_cvq = 0;
> >      g_autoptr(VhostIOVATree) iova_tree = NULL;
> > +    ERRP_GUARD();
> >
> >      assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> >      opts = &netdev->u.vhost_vdpa;
> > @@ -444,8 +495,9 @@ int net_init_vhost_vdpa(const Netdev *netdev, const 
> > char *name,
> >          qemu_close(vdpa_device_fd);
> >          return queue_pairs;
> >      }
> > -    if (opts->x_svq) {
> > -        struct vhost_vdpa_iova_range iova_range;
> > +    if (opts->x_cvq_svq || opts->x_svq) {
> > +        vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> > +
> >          uint64_t invalid_dev_features =
> >              features & ~vdpa_svq_device_features &
> >              /* Transport are all accepted at this point */
> > @@ -457,7 +509,21 @@ int net_init_vhost_vdpa(const Netdev *netdev, const 
> > char *name,
> >                         invalid_dev_features);
> >              goto err_svq;
> >          }
> > -        vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> > +    }
> > +
> > +    if (opts->x_cvq_svq) {
> > +        if (!has_cvq) {
> > +            error_setg(errp, "Cannot use x-cvq-svq with a device without 
> > cvq");
> > +            goto err_svq;
> > +        }
> > +
> > +        r = vhost_vdpa_check_cvq_svq(vdpa_device_fd, queue_pairs, errp);
> > +        if (unlikely(r)) {
> > +            error_prepend(errp, "Cannot configure CVQ SVQ: ");
> > +            goto err_svq;
> > +        }
> > +    }
> > +    if (opts->x_svq) {
> >          iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
> >      }
> >
> > @@ -472,11 +538,35 @@ int net_init_vhost_vdpa(const Netdev *netdev, const 
> > char *name,
> >      }
> >
> >      if (has_cvq) {
> > +        g_autoptr(VhostIOVATree) cvq_iova_tree = NULL;
> > +
> > +        if (opts->x_cvq_svq) {
> > +            cvq_iova_tree = vhost_iova_tree_new(iova_range.first,
> > +                                                iova_range.last);
> > +        } else if (opts->x_svq) {
> > +            cvq_iova_tree = vhost_iova_tree_acquire(iova_tree);
> > +        }
> > +
> >          nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> >                                   vdpa_device_fd, i, 1,
> > -                                 false, opts->x_svq, iova_tree);
> > +                                 false, opts->x_cvq_svq || opts->x_svq,
> > +                                 cvq_iova_tree);
> >          if (!nc)
> >              goto err;
> > +
> > +        if (opts->x_cvq_svq) {
> > +            struct vhost_vring_state asid = {
> > +                .index = 1,
> > +                .num = 1,
> > +            };
> > +
> > +            r = ioctl(vdpa_device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
> > +            if (unlikely(r)) {
> > +                error_setg_errno(errp, errno,
> > +                                 "Cannot set cvq group independent asid");
> > +                goto err;
> > +            }
> > +        }
> >      }
> >
> >      return 0;
> > --
> > 2.27.0
> >
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]