qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH RESEND v10] vhost: used_memslots refactoring


From: Zhoujian (jay)
Subject: Re: [Qemu-devel] [PATCH RESEND v10] vhost: used_memslots refactoring
Date: Tue, 1 May 2018 08:18:16 +0000

Hi Michael and Igor,
I'm afraid this patch has been missed, could you help to review?

PS: In order to avoid side effect, code redundancy does exist between
vhost_user_set_used_memslots(), vhost_user_set_mem_table() and
vhost_user_set_mem_table_postcopy(), I haven't found a good solution to
solve it since my limited knowledge. But the counting for vhost-user
and vhost-kernel's used_memslots are okay now, I think. Pls let me know
if there're issues to be dealt with.

Regards,
Jay

> -----Original Message-----
> From: Zhoujian (jay)
> Sent: Tuesday, March 27, 2018 12:14 PM
> To: address@hidden
> Cc: address@hidden; address@hidden; Huangweidong (C)
> <address@hidden>; wangxin (U) <address@hidden>; Gonglei
> (Arei) <address@hidden>; Zhoujian (jay) <address@hidden>;
> Liuzhe (Ahriy, Euler) <address@hidden>
> Subject: [PATCH RESEND v10] vhost: used_memslots refactoring
> 
> Used_memslots is shared by vhost kernel and user, it is equal to
> dev->mem->nregions, which is correct for vhost kernel, but not for
> vhost user, the latter one uses memory regions that have file descriptor. E.g.
> a VM has a vhost-user NIC and 8(vhost user memslot upper limit) memory slots,
> it will be failed to hotplug a new DIMM device since vhost_has_free_slot()
> finds no free slot left. It should be successful if only part of memory slots
> have file descriptor, so setting used memslots for vhost-user and vhost-
> kernel respectively.
> 
> Signed-off-by: Igor Mammedov <address@hidden>
> Signed-off-by: Jay Zhou <address@hidden>
> Signed-off-by: Liuzhe <address@hidden>
> ---
> 
> v10:
>  - fix misaligned access to structures
>  - refine on setting used_memslots for vhost-user to
>    avoid side effect
>  - distinguish "has free memslots" and "out of memslots"
> v7 ... v9:
>  - rebased on the master
> v2 ... v6:
>  - delete the "used_memslots" global variable, and add it
>    for vhost-user and vhost-kernel separately
>  - refine the function, commit log
>  - used_memslots refactoring
> 
>  hw/virtio/vhost-backend.c         | 21 ++++++++++++++++++++-
>  hw/virtio/vhost-user.c            | 36 +++++++++++++++++++++++++++++++++---
>  hw/virtio/vhost.c                 | 13 ++++++-------
>  include/hw/virtio/vhost-backend.h |  8 ++++++--
>  4 files changed, 65 insertions(+), 13 deletions(-)
> 
> diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c index
> 7f09efa..3539b5f 100644
> --- a/hw/virtio/vhost-backend.c
> +++ b/hw/virtio/vhost-backend.c
> @@ -15,6 +15,8 @@
>  #include "hw/virtio/vhost-backend.h"
>  #include "qemu/error-report.h"
> 
> +static unsigned int vhost_kernel_used_memslots;
> +
>  static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int
> request,
>                               void *arg)  { @@ -62,6 +64,21 @@ static int
> vhost_kernel_memslots_limit(struct vhost_dev *dev)
>      return limit;
>  }
> 
> +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev) {
> +    vhost_kernel_used_memslots = dev->mem->nregions; }
> +
> +static bool vhost_kernel_has_free_memslots(struct vhost_dev *dev) {
> +    return vhost_kernel_used_memslots <
> +vhost_kernel_memslots_limit(dev); }
> +
> +static bool vhost_kernel_out_of_memslots(struct vhost_dev *dev) {
> +    return vhost_kernel_used_memslots >
> +vhost_kernel_memslots_limit(dev); }
> +
>  static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
>                                          struct vhost_vring_file *file)  { @@
> -237,7 +254,9 @@ static const VhostOps kernel_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
>          .vhost_backend_init = vhost_kernel_init,
>          .vhost_backend_cleanup = vhost_kernel_cleanup,
> -        .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
> +        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
> +        .vhost_backend_has_free_memslots = vhost_kernel_has_free_memslots,
> +        .vhost_out_of_memslots = vhost_kernel_out_of_memslots,
>          .vhost_net_set_backend = vhost_kernel_net_set_backend,
>          .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
>          .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint, diff
> --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index
> 44aea5c..9691806 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -170,6 +170,8 @@ static VhostUserMsg m __attribute__ ((unused));
>  /* The version of the protocol we support */
>  #define VHOST_USER_VERSION    (0x1)
> 
> +static unsigned int vhost_user_used_memslots;
> +
>  struct vhost_user {
>      struct vhost_dev *dev;
>      CharBackend *chr;
> @@ -1289,9 +1291,35 @@ static int vhost_user_get_vq_index(struct vhost_dev
> *dev, int idx)
>      return idx;
>  }
> 
> -static int vhost_user_memslots_limit(struct vhost_dev *dev)
> +static void vhost_user_set_used_memslots(struct vhost_dev *dev) {
> +    int i, fd;
> +    unsigned int fd_num = 0;
> +
> +    for (i = 0; i < dev->mem->nregions; ++i) {
> +        struct vhost_memory_region *reg = dev->mem->regions + i;
> +        ram_addr_t offset;
> +        MemoryRegion *mr;
> +
> +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> +        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
> +                                     &offset);
> +        fd = memory_region_get_fd(mr);
> +        if (fd > 0) {
> +            fd_num++;
> +        }
> +    }
> +    vhost_user_used_memslots = fd_num;
> +}
> +
> +static bool vhost_user_has_free_memslots(struct vhost_dev *dev) {
> +    return vhost_user_used_memslots < VHOST_MEMORY_MAX_NREGIONS; }
> +
> +static bool vhost_user_out_of_memslots(struct vhost_dev *dev)
>  {
> -    return VHOST_MEMORY_MAX_NREGIONS;
> +    return vhost_user_used_memslots > VHOST_MEMORY_MAX_NREGIONS;
>  }
> 
>  static bool vhost_user_requires_shm_log(struct vhost_dev *dev) @@ -1563,7
> +1591,9 @@ const VhostOps user_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_USER,
>          .vhost_backend_init = vhost_user_init,
>          .vhost_backend_cleanup = vhost_user_cleanup,
> -        .vhost_backend_memslots_limit = vhost_user_memslots_limit,
> +        .vhost_set_used_memslots = vhost_user_set_used_memslots,
> +        .vhost_backend_has_free_memslots = vhost_user_has_free_memslots,
> +        .vhost_out_of_memslots = vhost_user_out_of_memslots,
>          .vhost_set_log_base = vhost_user_set_log_base,
>          .vhost_set_mem_table = vhost_user_set_mem_table,
>          .vhost_set_vring_addr = vhost_user_set_vring_addr, diff --git
> a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 250f886..706951a 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -44,20 +44,19 @@
>  static struct vhost_log *vhost_log;
>  static struct vhost_log *vhost_log_shm;
> 
> -static unsigned int used_memslots;
>  static QLIST_HEAD(, vhost_dev) vhost_devices =
>      QLIST_HEAD_INITIALIZER(vhost_devices);
> 
>  bool vhost_has_free_slot(void)
>  {
> -    unsigned int slots_limit = ~0U;
>      struct vhost_dev *hdev;
> 
>      QLIST_FOREACH(hdev, &vhost_devices, entry) {
> -        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> -        slots_limit = MIN(slots_limit, r);
> +        if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
> +            return false;
> +        }
>      }
> -    return slots_limit > used_memslots;
> +    return true;
>  }
> 
>  static void vhost_dev_sync_region(struct vhost_dev *dev, @@ -446,7 +445,6 @@
> static void vhost_commit(MemoryListener *listener)
>                         dev->n_mem_sections * sizeof dev->mem->regions[0];
>      dev->mem = g_realloc(dev->mem, regions_size);
>      dev->mem->nregions = dev->n_mem_sections;
> -    used_memslots = dev->mem->nregions;
>      for (i = 0; i < dev->n_mem_sections; i++) {
>          struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
>          struct MemoryRegionSection *mrs = dev->mem_sections + i; @@ -458,6
> +456,7 @@ static void vhost_commit(MemoryListener *listener)
>              mrs->offset_within_region;
>          cur_vmr->flags_padding   = 0;
>      }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
> 
>      if (!dev->started) {
>          goto out;
> @@ -1248,7 +1247,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      memory_listener_register(&hdev->memory_listener, &address_space_memory);
>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
> 
> -    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev))
> {
> +    if (hdev->vhost_ops->vhost_out_of_memslots(hdev)) {
>          error_report("vhost backend memory slots limit is less"
>                  " than current number of present memory slots");
>          r = -1;
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-
> backend.h
> index 5dac61f..f7ab82d 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -36,7 +36,9 @@ struct vhost_iotlb_msg;
> 
>  typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
> typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev); -typedef int
> (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
> +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
> +typedef bool (*vhost_backend_has_free_memslots)(struct vhost_dev *dev);
> +typedef bool (*vhost_out_of_memslots)(struct vhost_dev *dev);
> 
>  typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev,
>                                  struct vhost_vring_file *file); @@ -105,7
> +107,9 @@ typedef struct VhostOps {
>      VhostBackendType backend_type;
>      vhost_backend_init vhost_backend_init;
>      vhost_backend_cleanup vhost_backend_cleanup;
> -    vhost_backend_memslots_limit vhost_backend_memslots_limit;
> +    vhost_set_used_memslots_op vhost_set_used_memslots;
> +    vhost_backend_has_free_memslots vhost_backend_has_free_memslots;
> +    vhost_out_of_memslots vhost_out_of_memslots;
>      vhost_net_set_backend_op vhost_net_set_backend;
>      vhost_net_set_mtu_op vhost_net_set_mtu;
>      vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint;
> --
> 1.8.3.1
> 




reply via email to

[Prev in Thread] Current Thread [Next in Thread]