qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 01/31] vdpa: Reorder virtio/vhost-vdpa.c functions


From: Eugenio Perez Martin
Subject: Re: [PATCH 01/31] vdpa: Reorder virtio/vhost-vdpa.c functions
Date: Mon, 21 Feb 2022 08:42:22 +0100

On Mon, Feb 21, 2022 at 8:31 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2022/1/28 下午3:57, Eugenio Perez Martin 写道:
> > On Fri, Jan 28, 2022 at 6:59 AM Jason Wang <jasowang@redhat.com> wrote:
> >>
> >> 在 2022/1/22 上午4:27, Eugenio Pérez 写道:
> >>> vhost_vdpa_set_features and vhost_vdpa_init need to use
> >>> vhost_vdpa_get_features in svq mode.
> >>>
> >>> vhost_vdpa_dev_start needs to use almost all _set_ functions:
> >>> vhost_vdpa_set_vring_dev_kick, vhost_vdpa_set_vring_dev_call,
> >>> vhost_vdpa_set_dev_vring_base and vhost_vdpa_set_dev_vring_num.
> >>>
> >>> No functional change intended.
> >>
> >> Is it related (a must) to the SVQ code?
> >>
> > Yes, SVQ needs to access the device variants to configure it, while
> > exposing the SVQ ones.
> >
> > For example for set_features, SVQ needs to set device features in the
> > start code, but expose SVQ ones to the guest.
> >
> > Another possibility is to forward-declare them but I feel it pollutes
> > the code more, doesn't it? Is there any reason to avoid the reordering
> > beyond reducing the number of changes/patches?
>
>
> No, but for reviewer, it might be easier if you squash the reordering
> logic into the patch which needs that.
>

Sure, I can do that way. I thought the opposite but I can merge the
reorder in the different patches for the next version for sure.

Thanks!

> Thanks
>
>
> >
> > Thanks!
> >
> >
> >> Thanks
> >>
> >>
> >>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> >>> ---
> >>>    hw/virtio/vhost-vdpa.c | 164 ++++++++++++++++++++---------------------
> >>>    1 file changed, 82 insertions(+), 82 deletions(-)
> >>>
> >>> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> >>> index 04ea43704f..6c10a7f05f 100644
> >>> --- a/hw/virtio/vhost-vdpa.c
> >>> +++ b/hw/virtio/vhost-vdpa.c
> >>> @@ -342,41 +342,6 @@ static bool vhost_vdpa_one_time_request(struct 
> >>> vhost_dev *dev)
> >>>        return v->index != 0;
> >>>    }
> >>>
> >>> -static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error 
> >>> **errp)
> >>> -{
> >>> -    struct vhost_vdpa *v;
> >>> -    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> >>> -    trace_vhost_vdpa_init(dev, opaque);
> >>> -    int ret;
> >>> -
> >>> -    /*
> >>> -     * Similar to VFIO, we end up pinning all guest memory and have to
> >>> -     * disable discarding of RAM.
> >>> -     */
> >>> -    ret = ram_block_discard_disable(true);
> >>> -    if (ret) {
> >>> -        error_report("Cannot set discarding of RAM broken");
> >>> -        return ret;
> >>> -    }
> >>> -
> >>> -    v = opaque;
> >>> -    v->dev = dev;
> >>> -    dev->opaque =  opaque ;
> >>> -    v->listener = vhost_vdpa_memory_listener;
> >>> -    v->msg_type = VHOST_IOTLB_MSG_V2;
> >>> -
> >>> -    vhost_vdpa_get_iova_range(v);
> >>> -
> >>> -    if (vhost_vdpa_one_time_request(dev)) {
> >>> -        return 0;
> >>> -    }
> >>> -
> >>> -    vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
> >>> -                               VIRTIO_CONFIG_S_DRIVER);
> >>> -
> >>> -    return 0;
> >>> -}
> >>> -
> >>>    static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
> >>>                                                int queue_index)
> >>>    {
> >>> @@ -506,24 +471,6 @@ static int vhost_vdpa_set_mem_table(struct vhost_dev 
> >>> *dev,
> >>>        return 0;
> >>>    }
> >>>
> >>> -static int vhost_vdpa_set_features(struct vhost_dev *dev,
> >>> -                                   uint64_t features)
> >>> -{
> >>> -    int ret;
> >>> -
> >>> -    if (vhost_vdpa_one_time_request(dev)) {
> >>> -        return 0;
> >>> -    }
> >>> -
> >>> -    trace_vhost_vdpa_set_features(dev, features);
> >>> -    ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
> >>> -    if (ret) {
> >>> -        return ret;
> >>> -    }
> >>> -
> >>> -    return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
> >>> -}
> >>> -
> >>>    static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
> >>>    {
> >>>        uint64_t features;
> >>> @@ -646,35 +593,6 @@ static int vhost_vdpa_get_config(struct vhost_dev 
> >>> *dev, uint8_t *config,
> >>>        return ret;
> >>>     }
> >>>
> >>> -static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
> >>> -{
> >>> -    struct vhost_vdpa *v = dev->opaque;
> >>> -    trace_vhost_vdpa_dev_start(dev, started);
> >>> -
> >>> -    if (started) {
> >>> -        vhost_vdpa_host_notifiers_init(dev);
> >>> -        vhost_vdpa_set_vring_ready(dev);
> >>> -    } else {
> >>> -        vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
> >>> -    }
> >>> -
> >>> -    if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
> >>> -        return 0;
> >>> -    }
> >>> -
> >>> -    if (started) {
> >>> -        memory_listener_register(&v->listener, &address_space_memory);
> >>> -        return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
> >>> -    } else {
> >>> -        vhost_vdpa_reset_device(dev);
> >>> -        vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
> >>> -                                   VIRTIO_CONFIG_S_DRIVER);
> >>> -        memory_listener_unregister(&v->listener);
> >>> -
> >>> -        return 0;
> >>> -    }
> >>> -}
> >>> -
> >>>    static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t 
> >>> base,
> >>>                                         struct vhost_log *log)
> >>>    {
> >>> @@ -735,6 +653,35 @@ static int vhost_vdpa_set_vring_call(struct 
> >>> vhost_dev *dev,
> >>>        return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
> >>>    }
> >>>
> >>> +static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
> >>> +{
> >>> +    struct vhost_vdpa *v = dev->opaque;
> >>> +    trace_vhost_vdpa_dev_start(dev, started);
> >>> +
> >>> +    if (started) {
> >>> +        vhost_vdpa_host_notifiers_init(dev);
> >>> +        vhost_vdpa_set_vring_ready(dev);
> >>> +    } else {
> >>> +        vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
> >>> +    }
> >>> +
> >>> +    if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
> >>> +        return 0;
> >>> +    }
> >>> +
> >>> +    if (started) {
> >>> +        memory_listener_register(&v->listener, &address_space_memory);
> >>> +        return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
> >>> +    } else {
> >>> +        vhost_vdpa_reset_device(dev);
> >>> +        vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
> >>> +                                   VIRTIO_CONFIG_S_DRIVER);
> >>> +        memory_listener_unregister(&v->listener);
> >>> +
> >>> +        return 0;
> >>> +    }
> >>> +}
> >>> +
> >>>    static int vhost_vdpa_get_features(struct vhost_dev *dev,
> >>>                                         uint64_t *features)
> >>>    {
> >>> @@ -745,6 +692,24 @@ static int vhost_vdpa_get_features(struct vhost_dev 
> >>> *dev,
> >>>        return ret;
> >>>    }
> >>>
> >>> +static int vhost_vdpa_set_features(struct vhost_dev *dev,
> >>> +                                   uint64_t features)
> >>> +{
> >>> +    int ret;
> >>> +
> >>> +    if (vhost_vdpa_one_time_request(dev)) {
> >>> +        return 0;
> >>> +    }
> >>> +
> >>> +    trace_vhost_vdpa_set_features(dev, features);
> >>> +    ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
> >>> +    if (ret) {
> >>> +        return ret;
> >>> +    }
> >>> +
> >>> +    return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
> >>> +}
> >>> +
> >>>    static int vhost_vdpa_set_owner(struct vhost_dev *dev)
> >>>    {
> >>>        if (vhost_vdpa_one_time_request(dev)) {
> >>> @@ -772,6 +737,41 @@ static bool  vhost_vdpa_force_iommu(struct vhost_dev 
> >>> *dev)
> >>>        return true;
> >>>    }
> >>>
> >>> +static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error 
> >>> **errp)
> >>> +{
> >>> +    struct vhost_vdpa *v;
> >>> +    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> >>> +    trace_vhost_vdpa_init(dev, opaque);
> >>> +    int ret;
> >>> +
> >>> +    /*
> >>> +     * Similar to VFIO, we end up pinning all guest memory and have to
> >>> +     * disable discarding of RAM.
> >>> +     */
> >>> +    ret = ram_block_discard_disable(true);
> >>> +    if (ret) {
> >>> +        error_report("Cannot set discarding of RAM broken");
> >>> +        return ret;
> >>> +    }
> >>> +
> >>> +    v = opaque;
> >>> +    v->dev = dev;
> >>> +    dev->opaque =  opaque ;
> >>> +    v->listener = vhost_vdpa_memory_listener;
> >>> +    v->msg_type = VHOST_IOTLB_MSG_V2;
> >>> +
> >>> +    vhost_vdpa_get_iova_range(v);
> >>> +
> >>> +    if (vhost_vdpa_one_time_request(dev)) {
> >>> +        return 0;
> >>> +    }
> >>> +
> >>> +    vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
> >>> +                               VIRTIO_CONFIG_S_DRIVER);
> >>> +
> >>> +    return 0;
> >>> +}
> >>> +
> >>>    const VhostOps vdpa_ops = {
> >>>            .backend_type = VHOST_BACKEND_TYPE_VDPA,
> >>>            .vhost_backend_init = vhost_vdpa_init,
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]