qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3] vhost-vdpa: Do not send empty IOTLB update batches


From: Jason Wang
Subject: Re: [PATCH v3] vhost-vdpa: Do not send empty IOTLB update batches
Date: Mon, 16 Aug 2021 10:51:57 +0800

On Thu, Aug 12, 2021 at 10:09 PM Eugenio Pérez <eperezma@redhat.com> wrote:
>
> With the introduction of the batch hinting, meaningless batches can be
> created with no IOTLB updates if the memory region was skipped by
> vhost_vdpa_listener_skipped_section. This is the case of host notifiers
> memory regions, device un/realize, and others. This causes the vdpa
> device to receive dma mapping settings with no changes, a possibly
> expensive operation for nothing.
>
> To avoid that, VHOST_IOTLB_BATCH_BEGIN hint is delayed until we have a
> meaningful (not skipped section) mapping or unmapping operation, and
> VHOST_IOTLB_BATCH_END is not written unless at least one of _UPDATE /
> _INVALIDATE has been issued.

Acked-by: Jason Wang <jasowang@redhat.com>

>
> v3:
>   * Use a bool instead of a counter avoiding potential number wrapping
>   * Fix bad check on _commit
>   * Move VHOST_BACKEND_F_IOTLB_BATCH check to
>     vhost_vdpa_iotlb_batch_begin_once
>
> v2 (from RFC):
>   * Rename misleading name
>   * Abstract start batching function for listener_add/del
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
>  include/hw/virtio/vhost-vdpa.h |  1 +
>  hw/virtio/vhost-vdpa.c         | 35 ++++++++++++++++++++++------------
>  2 files changed, 24 insertions(+), 12 deletions(-)
>
> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> index e98e327f12..6b9288fef8 100644
> --- a/include/hw/virtio/vhost-vdpa.h
> +++ b/include/hw/virtio/vhost-vdpa.h
> @@ -23,6 +23,7 @@ typedef struct vhost_vdpa {
>      int device_fd;
>      int index;
>      uint32_t msg_type;
> +    bool iotlb_batch_begin_sent;
>      MemoryListener listener;
>      struct vhost_dev *dev;
>      VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 6ce94a1f4d..93b7db61d1 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -89,19 +89,13 @@ static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, 
> hwaddr iova,
>      return ret;
>  }
>
> -static void vhost_vdpa_listener_begin(MemoryListener *listener)
> +static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
>  {
> -    struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, 
> listener);
> -    struct vhost_dev *dev = v->dev;
> -    struct vhost_msg_v2 msg = {};
>      int fd = v->device_fd;
> -
> -    if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
> -        return;
> -    }
> -
> -    msg.type = v->msg_type;
> -    msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
> +    struct vhost_msg_v2 msg = {
> +        .type = v->msg_type,
> +        .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
> +    };
>
>      if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
>          error_report("failed to write, fd=%d, errno=%d (%s)",
> @@ -109,6 +103,16 @@ static void vhost_vdpa_listener_begin(MemoryListener 
> *listener)
>      }
>  }
>
> +static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
> +{
> +    if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
> +        !v->iotlb_batch_begin_sent) {
> +        vhost_vdpa_listener_begin_batch(v);
> +    }
> +
> +    v->iotlb_batch_begin_sent = true;
> +}
> +
>  static void vhost_vdpa_listener_commit(MemoryListener *listener)
>  {
>      struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, 
> listener);
> @@ -120,6 +124,10 @@ static void vhost_vdpa_listener_commit(MemoryListener 
> *listener)
>          return;
>      }
>
> +    if (!v->iotlb_batch_begin_sent) {
> +        return;
> +    }
> +
>      msg.type = v->msg_type;
>      msg.iotlb.type = VHOST_IOTLB_BATCH_END;
>
> @@ -127,6 +135,8 @@ static void vhost_vdpa_listener_commit(MemoryListener 
> *listener)
>          error_report("failed to write, fd=%d, errno=%d (%s)",
>                       fd, errno, strerror(errno));
>      }
> +
> +    v->iotlb_batch_begin_sent = false;
>  }
>
>  static void vhost_vdpa_listener_region_add(MemoryListener *listener,
> @@ -170,6 +180,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener 
> *listener,
>
>      llsize = int128_sub(llend, int128_make64(iova));
>
> +    vhost_vdpa_iotlb_batch_begin_once(v);
>      ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
>                               vaddr, section->readonly);
>      if (ret) {
> @@ -221,6 +232,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener 
> *listener,
>
>      llsize = int128_sub(llend, int128_make64(iova));
>
> +    vhost_vdpa_iotlb_batch_begin_once(v);
>      ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
>      if (ret) {
>          error_report("vhost_vdpa dma unmap error!");
> @@ -234,7 +246,6 @@ static void vhost_vdpa_listener_region_del(MemoryListener 
> *listener,
>   * depends on the addnop().
>   */
>  static const MemoryListener vhost_vdpa_memory_listener = {
> -    .begin = vhost_vdpa_listener_begin,
>      .commit = vhost_vdpa_listener_commit,
>      .region_add = vhost_vdpa_listener_region_add,
>      .region_del = vhost_vdpa_listener_region_del,
> --
> 2.27.0
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]