qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC PATCH v7 06/25] vdpa: Send all updates in memory listener commi


From: Eugenio Perez Martin
Subject: Re: [RFC PATCH v7 06/25] vdpa: Send all updates in memory listener commit
Date: Fri, 22 Apr 2022 11:17:14 +0200

On Thu, Apr 14, 2022 at 6:12 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2022/4/14 00:31, Eugenio Pérez 写道:
> > With the introduction of many ASID it can happen that many changes on
> > different listeners come before the commit call.
>
>
> I think we have at most one listener even for the case of MQ/CVQ?
>

In this series we will have one listener per ASID used. To do it
differently requires either not sending all the guest space to CVQ (I
find it ok although it may be slower and a little bit trickier) or to
make the last vhost_vdpa device model to be aware of all others ASID.

>
> >   Since kernel vhost-vdpa
> > still does not support it, send it all in one shot.
> >
> > This also have one extra advantage: If there is no update to notify, we
> > save the iotlb_{begin,end} calls.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> >   include/hw/virtio/vhost-vdpa.h |  2 +-
> >   hw/virtio/vhost-vdpa.c         | 69 +++++++++++++++++-----------------
> >   2 files changed, 36 insertions(+), 35 deletions(-)
> >
> > diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> > index a29dbb3f53..4961acea8b 100644
> > --- a/include/hw/virtio/vhost-vdpa.h
> > +++ b/include/hw/virtio/vhost-vdpa.h
> > @@ -27,7 +27,7 @@ typedef struct vhost_vdpa {
> >       int device_fd;
> >       int index;
> >       uint32_t msg_type;
> > -    bool iotlb_batch_begin_sent;
> > +    GArray *iotlb_updates;
> >       MemoryListener listener;
> >       struct vhost_vdpa_iova_range iova_range;
> >       uint64_t acked_features;
> > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > index 1f229ff4cb..27ee678dc9 100644
> > --- a/hw/virtio/vhost-vdpa.c
> > +++ b/hw/virtio/vhost-vdpa.c
> > @@ -85,6 +85,11 @@ static int vhost_vdpa_dma_map(struct vhost_vdpa *v, 
> > hwaddr iova, hwaddr size,
> >       msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
> >       msg.iotlb.type = VHOST_IOTLB_UPDATE;
> >
> > +    if (v->dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_IOTLB_BATCH)) {
> > +        g_array_append_val(v->iotlb_updates, msg);
> > +        return 0;
> > +    }
>
>
> I think it's better to use a consistent way for !batch and batch (E.g we
> can do this even for the backend that doesn't support batching?)
>

Yes, with no batching it simply sends all the batch here instead of in commit.

> Otherwise the codes are hard to be maintained.
>

I think it's hard to come back to the previous model as long as we
need two listeners. I can try to remove the need of the asid 1
listener, but if we're not able the possibility for this is to always
delay the maps to memory listener commit callback then.

>
> > +
> >      trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, 
> > msg.iotlb.size,
> >                               msg.iotlb.uaddr, msg.iotlb.perm, 
> > msg.iotlb.type);
> >
> > @@ -109,6 +114,11 @@ static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, 
> > hwaddr iova,
> >       msg.iotlb.size = size;
> >       msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
> >
> > +    if (v->dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_IOTLB_BATCH)) {
> > +        g_array_append_val(v->iotlb_updates, msg);
> > +        return 0;
> > +    }
> > +
> >       trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
> >                                  msg.iotlb.size, msg.iotlb.type);
> >
> > @@ -121,56 +131,47 @@ static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, 
> > hwaddr iova,
> >       return ret;
> >   }
> >
> > -static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
> > -{
> > -    int fd = v->device_fd;
> > -    struct vhost_msg_v2 msg = {
> > -        .type = v->msg_type,
> > -        .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
> > -    };
> > -
> > -    trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type);
> > -    if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
> > -        error_report("failed to write, fd=%d, errno=%d (%s)",
> > -                     fd, errno, strerror(errno));
> > -    }
> > -}
> > -
> > -static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
> > -{
> > -    if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
> > -        !v->iotlb_batch_begin_sent) {
> > -        vhost_vdpa_listener_begin_batch(v);
> > -    }
> > -
> > -    v->iotlb_batch_begin_sent = true;
> > -}
> > -
> >   static void vhost_vdpa_listener_commit(MemoryListener *listener)
> >   {
> >       struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, 
> > listener);
> > -    struct vhost_dev *dev = v->dev;
> >       struct vhost_msg_v2 msg = {};
> >       int fd = v->device_fd;
> > +    size_t num = v->iotlb_updates->len;
> >
> > -    if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
> > +    if (!num) {
> >           return;
> >       }
> >
> > -    if (!v->iotlb_batch_begin_sent) {
> > -        return;
> > +    msg.type = v->msg_type;
> > +    msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
> > +    trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type);
> > +    if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
>
>
> We need check whehter the vhost-vDPA support batching first?
>

If it's not supported, num == 0.

>
> > +        error_report("failed to write BEGIN_BATCH, fd=%d, errno=%d (%s)",
> > +                     fd, errno, strerror(errno));
> > +        goto done;
> >       }
> >
> > -    msg.type = v->msg_type;
> > -    msg.iotlb.type = VHOST_IOTLB_BATCH_END;
> > +    for (size_t i = 0; i < num; ++i) {
> > +        struct vhost_msg_v2 *update = &g_array_index(v->iotlb_updates,
> > +                                                     struct vhost_msg_v2, 
> > i);
> > +        if (write(fd, update, sizeof(*update)) != sizeof(*update)) {
> > +            error_report("failed to write dma update, fd=%d, errno=%d 
> > (%s)",
> > +                         fd, errno, strerror(errno));
> > +            goto done;
>
>
> Maybe it's time to introduce v3 to allow a batch of messaged to be
> passed to vhost-vDPA in a single system call.
>

It would be nice but then we're not solving the problem for pre-v3 kernels.

Thanks!

> Thanks
>
>
> > +        }
> > +    }
> >
> > +    msg.iotlb.type = VHOST_IOTLB_BATCH_END;
> >       trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type);
> >       if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
> >           error_report("failed to write, fd=%d, errno=%d (%s)",
> >                        fd, errno, strerror(errno));
> >       }
> >
> > -    v->iotlb_batch_begin_sent = false;
> > +done:
> > +    g_array_set_size(v->iotlb_updates, 0);
> > +    return;
> > +
> >   }
> >
> >   static void vhost_vdpa_listener_region_add(MemoryListener *listener,
> > @@ -227,7 +228,6 @@ static void 
> > vhost_vdpa_listener_region_add(MemoryListener *listener,
> >           iova = mem_region.iova;
> >       }
> >
> > -    vhost_vdpa_iotlb_batch_begin_once(v);
> >       ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
> >                                vaddr, section->readonly);
> >       if (ret) {
> > @@ -292,7 +292,6 @@ static void 
> > vhost_vdpa_listener_region_del(MemoryListener *listener,
> >           iova = result->iova;
> >           vhost_iova_tree_remove(v->iova_tree, &mem_region);
> >       }
> > -    vhost_vdpa_iotlb_batch_begin_once(v);
> >       ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
> >       if (ret) {
> >           error_report("vhost_vdpa dma unmap error!");
> > @@ -446,6 +445,7 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void 
> > *opaque, Error **errp)
> >       dev->opaque =  opaque ;
> >       v->listener = vhost_vdpa_memory_listener;
> >       v->msg_type = VHOST_IOTLB_MSG_V2;
> > +    v->iotlb_updates = g_array_new(false, false, sizeof(struct 
> > vhost_msg_v2));
> >       ret = vhost_vdpa_init_svq(dev, v, errp);
> >       if (ret) {
> >           goto err;
> > @@ -579,6 +579,7 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
> >       trace_vhost_vdpa_cleanup(dev, v);
> >       vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
> >       memory_listener_unregister(&v->listener);
> > +    g_array_free(v->iotlb_updates, true);
> >       vhost_vdpa_svq_cleanup(dev);
> >
> >       dev->opaque = NULL;
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]