qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH] vhost-user: add separate memslot counter for vhost-user


From: Raphael Norwitz
Subject: Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
Date: Mon, 14 Sep 2020 21:27:32 -0400

A few more comments mixed in below.

Once these are addressed I'd support this change from the vhost-user
side. We should get opinions from other maintainers though,
particularly for the kernel backend and vhost-user-net changes.

On Tue, Sep 8, 2020 at 2:11 AM Jiajun Chen <chenjiajun8@huawei.com> wrote:
>
> Used_memslots is equal to dev->mem->nregions now, it is true for
> vhost kernel, but not for vhost user, which uses the memory regions
> that have file descriptor. In fact, not all of the memory regions
> have file descriptor.
> It is usefully in some scenarios, e.g. used_memslots is 8, and only
> 5 memory slots can be used by vhost user, it is failed to hot plug
> a new memory RAM because vhost_has_free_slot just returned false,
> but we can hot plug it safely in fact.
>
> --
> ChangeList:
> v2:
> -eliminating useless used_memslots_exceeded variable and 
> used_memslots_is_exceeded() API
>
> v1:
> -vhost-user: add separate memslot counter for vhost-user
>
> Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
> Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
> ---
>  hw/virtio/vhost-backend.c         | 14 ++++++++++++
>  hw/virtio/vhost-user.c            | 28 +++++++++++++++++++++++
>  hw/virtio/vhost.c                 | 37 +++++++++++++++++++++++--------
>  include/hw/virtio/vhost-backend.h |  5 +++++
>  net/vhost-user.c                  |  7 ++++++
>  5 files changed, 82 insertions(+), 9 deletions(-)
>
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 31231218dc..ec46a745fa 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -232,6 +232,7 @@ static VhostUserMsg m __attribute__ ((unused));
>
>  /* The version of the protocol we support */
>  #define VHOST_USER_VERSION    (0x1)
> +static unsigned int vhost_user_used_memslots;

I worry that using a global static value may introduce a lot of edge
cases which may be difficult to account for. Can we store this state
per vhost_user dev, either in the VhostUserState or struct vhost_user?

>
>  struct vhost_user {
>      struct vhost_dev *dev;
> @@ -2354,6 +2355,31 @@ void vhost_user_cleanup(VhostUserState *user)
>      user->chr = NULL;
>  }
>
> +static void vhost_user_set_used_memslots(struct vhost_dev *dev)
> +{
> +    unsigned int counter = 0;
> +    int i;
> +
> +    for (i = 0; i < dev->mem->nregions; ++i) {
> +        struct vhost_memory_region *reg = dev->mem->regions + i;
> +        ram_addr_t offset;
> +        MemoryRegion *mr;
> +        int fd;
> +
> +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);

This assert is redundant - vhost_user_get_mr_data already checks this.

> +        mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
> +        if (mr && fd > 0) {
> +            counter++;
> +        }
> +    }
> +    vhost_user_used_memslots = counter;
> +}
> +
> +static unsigned int vhost_user_get_used_memslots(void)
> +{
> +    return vhost_user_used_memslots;
> +}
> +
>  const VhostOps user_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_USER,
>          .vhost_backend_init = vhost_user_backend_init,
> @@ -2387,4 +2413,6 @@ const VhostOps user_ops = {
>          .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
>          .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
>          .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
> +        .vhost_set_used_memslots = vhost_user_set_used_memslots,
> +        .vhost_get_used_memslots = vhost_user_get_used_memslots,
>  };
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 1a1384e7a6..b8d617d24c 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -45,20 +45,20 @@
>  static struct vhost_log *vhost_log;
>  static struct vhost_log *vhost_log_shm;
>
> -static unsigned int used_memslots;
>  static QLIST_HEAD(, vhost_dev) vhost_devices =
>      QLIST_HEAD_INITIALIZER(vhost_devices);
>
>  bool vhost_has_free_slot(void)
>  {
> -    unsigned int slots_limit = ~0U;
>      struct vhost_dev *hdev;
>
>      QLIST_FOREACH(hdev, &vhost_devices, entry) {
> -        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> -        slots_limit = MIN(slots_limit, r);
> +        if (hdev->vhost_ops->vhost_get_used_memslots() >=
> +            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +            return false;
> +        }
>      }
> -    return slots_limit > used_memslots;
> +    return true;
>  }
>
>  static void vhost_dev_sync_region(struct vhost_dev *dev,
> @@ -502,7 +502,6 @@ static void vhost_commit(MemoryListener *listener)
>                         dev->n_mem_sections * sizeof dev->mem->regions[0];
>      dev->mem = g_realloc(dev->mem, regions_size);
>      dev->mem->nregions = dev->n_mem_sections;
> -    used_memslots = dev->mem->nregions;
>      for (i = 0; i < dev->n_mem_sections; i++) {
>          struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
>          struct MemoryRegionSection *mrs = dev->mem_sections + i;
> @@ -678,6 +677,7 @@ static void vhost_region_add_section(struct vhost_dev 
> *dev,
>          dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
>          memory_region_ref(section->mr);
>      }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
>  }
>
>  /* Used for both add and nop callbacks */
> @@ -693,6 +693,17 @@ static void vhost_region_addnop(MemoryListener *listener,
>      vhost_region_add_section(dev, section);
>  }
>
> +static void vhost_region_del(MemoryListener *listener,
> +                             MemoryRegionSection *section)
> +{
> +    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
> +                                         memory_listener);
> +    if (!vhost_section(dev, section)) {
> +        return;
> +    }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
> +}
> +
>  static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
>  {
>      struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
> @@ -1300,6 +1311,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      hdev->memory_listener = (MemoryListener) {
>          .begin = vhost_begin,
>          .commit = vhost_commit,
> +        .region_del = vhost_region_del,
>          .region_add = vhost_region_addnop,
>          .region_nop = vhost_region_addnop,
>          .log_start = vhost_log_start,
> @@ -1346,9 +1358,16 @@ int vhost_dev_init(struct vhost_dev *hdev, void 
> *opaque,
>      memory_listener_register(&hdev->memory_listener, &address_space_memory);
>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>
> -    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) 
> {
> -        error_report("vhost backend memory slots limit is less"
> -                " than current number of present memory slots");
> +    /*
> +     * If we started a VM without any vhost device,
> +     * for the first time vhost device hot-plug
> +     * (vhost_get_used_memslots is always 0),
> +     * so it needs to double check here

NIT - There should be a period at the end of the sentence in this comment.


> +     */
> +    if (hdev->vhost_ops->vhost_get_used_memslots() >
> +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +        error_report("vhost backend memory slots limit is less than"
> +                     " current number of present memory slots");
>          r = -1;
>          if (busyloop_timeout) {
>              goto fail_busyloop;



reply via email to

[Prev in Thread] Current Thread [Next in Thread]