[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH v8 2/2] vhost: used_memslots refactoring
From: |
Michael S. Tsirkin |
Subject: |
Re: [Qemu-devel] [PATCH v8 2/2] vhost: used_memslots refactoring |
Date: |
Thu, 1 Mar 2018 18:16:48 +0200 |
On Tue, Feb 27, 2018 at 03:10:05PM +0800, Jay Zhou wrote:
> Used_memslots is shared by vhost kernel and user, it is equal to
> dev->mem->nregions, which is correct for vhost kernel, but not for
> vhost user, the latter one uses memory regions that have file
> descriptor. E.g. a VM has a vhost-user NIC and 8(vhost user memslot
> upper limit) memory slots, it will be failed to hotplug a new DIMM
> device since vhost_has_free_slot() finds no free slot left. It
> should be successful if only part of memory slots have file
> descriptor, so setting used memslots for vhost-user and
> vhost-kernel respectively.
>
> Signed-off-by: Igor Mammedov <address@hidden>
> Signed-off-by: Jay Zhou <address@hidden>
> Signed-off-by: Liuzhe <address@hidden>
make check fails with this patch, I dropped it for now.
> ---
> hw/virtio/vhost-backend.c | 15 +++++++-
> hw/virtio/vhost-user.c | 77
> ++++++++++++++++++++++++++-------------
> hw/virtio/vhost.c | 13 +++----
> include/hw/virtio/vhost-backend.h | 6 ++-
> 4 files changed, 75 insertions(+), 36 deletions(-)
>
> diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> index 7f09efa..59def69 100644
> --- a/hw/virtio/vhost-backend.c
> +++ b/hw/virtio/vhost-backend.c
> @@ -15,6 +15,8 @@
> #include "hw/virtio/vhost-backend.h"
> #include "qemu/error-report.h"
>
> +static unsigned int vhost_kernel_used_memslots;
> +
> static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int
> request,
> void *arg)
> {
> @@ -62,6 +64,11 @@ static int vhost_kernel_memslots_limit(struct vhost_dev
> *dev)
> return limit;
> }
>
> +static bool vhost_kernel_has_free_memslots(struct vhost_dev *dev)
> +{
> + return vhost_kernel_used_memslots < vhost_kernel_memslots_limit(dev);
> +}
> +
> static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
> struct vhost_vring_file *file)
> {
> @@ -233,11 +240,16 @@ static void vhost_kernel_set_iotlb_callback(struct
> vhost_dev *dev,
> qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
> }
>
> +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
> +{
> + vhost_kernel_used_memslots = dev->mem->nregions;
> +}
> +
> static const VhostOps kernel_ops = {
> .backend_type = VHOST_BACKEND_TYPE_KERNEL,
> .vhost_backend_init = vhost_kernel_init,
> .vhost_backend_cleanup = vhost_kernel_cleanup,
> - .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
> + .vhost_backend_has_free_memslots = vhost_kernel_has_free_memslots,
> .vhost_net_set_backend = vhost_kernel_net_set_backend,
> .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
> .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint,
> @@ -264,6 +276,7 @@ static const VhostOps kernel_ops = {
> #endif /* CONFIG_VHOST_VSOCK */
> .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
> .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
> + .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
> };
>
> int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType
> backend_type)
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 6eb9798..f732c80 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -147,6 +147,8 @@ static VhostUserMsg m __attribute__ ((unused));
> /* The version of the protocol we support */
> #define VHOST_USER_VERSION (0x1)
>
> +static bool vhost_user_free_memslots = true;
> +
> struct vhost_user {
> CharBackend *chr;
> int slave_fd;
> @@ -314,12 +316,43 @@ static int vhost_user_set_log_base(struct vhost_dev
> *dev, uint64_t base,
> return 0;
> }
>
> +static int vhost_user_prepare_msg(struct vhost_dev *dev, VhostUserMemory
> *mem,
> + int *fds)
> +{
> + int i, fd;
> +
> + vhost_user_free_memslots = true;
> + for (i = 0, mem->nregions = 0; i < dev->mem->nregions; ++i) {
> + struct vhost_memory_region *reg = dev->mem->regions + i;
> + ram_addr_t offset;
> + MemoryRegion *mr;
> +
> + assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> + mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
> + &offset);
> + fd = memory_region_get_fd(mr);
> + if (fd > 0) {
> + if (mem->nregions == VHOST_MEMORY_MAX_NREGIONS) {
> + vhost_user_free_memslots = false;
> + return -1;
> + }
> +
> + mem->regions[mem->nregions].userspace_addr = reg->userspace_addr;
> + mem->regions[mem->nregions].memory_size = reg->memory_size;
> + mem->regions[mem->nregions].guest_phys_addr =
> reg->guest_phys_addr;
> + mem->regions[mem->nregions].mmap_offset = offset;
> + fds[mem->nregions++] = fd;
> + }
> + }
> +
> + return 0;
> +}
> +
> static int vhost_user_set_mem_table(struct vhost_dev *dev,
> struct vhost_memory *mem)
> {
> int fds[VHOST_MEMORY_MAX_NREGIONS];
> - int i, fd;
> - size_t fd_num = 0;
> + size_t fd_num;
> bool reply_supported = virtio_has_feature(dev->protocol_features,
>
> VHOST_USER_PROTOCOL_F_REPLY_ACK);
>
> @@ -332,29 +365,12 @@ static int vhost_user_set_mem_table(struct vhost_dev
> *dev,
> msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> }
>
> - for (i = 0; i < dev->mem->nregions; ++i) {
> - struct vhost_memory_region *reg = dev->mem->regions + i;
> - ram_addr_t offset;
> - MemoryRegion *mr;
> -
> - assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> - mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
> - &offset);
> - fd = memory_region_get_fd(mr);
> - if (fd > 0) {
> - if (fd_num == VHOST_MEMORY_MAX_NREGIONS) {
> - error_report("Failed preparing vhost-user memory table msg");
> - return -1;
> - }
> - msg.payload.memory.regions[fd_num].userspace_addr =
> reg->userspace_addr;
> - msg.payload.memory.regions[fd_num].memory_size =
> reg->memory_size;
> - msg.payload.memory.regions[fd_num].guest_phys_addr =
> reg->guest_phys_addr;
> - msg.payload.memory.regions[fd_num].mmap_offset = offset;
> - fds[fd_num++] = fd;
> - }
> + if (vhost_user_prepare_msg(dev, &msg.payload.memory, fds) < 0) {
> + error_report("Failed preparing vhost-user memory table msg");
> + return -1;
> }
>
> - msg.payload.memory.nregions = fd_num;
> + fd_num = msg.payload.memory.nregions;
>
> if (!fd_num) {
> error_report("Failed initializing vhost-user memory map, "
> @@ -870,9 +886,9 @@ static int vhost_user_get_vq_index(struct vhost_dev *dev,
> int idx)
> return idx;
> }
>
> -static int vhost_user_memslots_limit(struct vhost_dev *dev)
> +static bool vhost_user_has_free_memslots(struct vhost_dev *dev)
> {
> - return VHOST_MEMORY_MAX_NREGIONS;
> + return vhost_user_free_memslots;
> }
>
> static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
> @@ -1054,11 +1070,19 @@ static int vhost_user_set_config(struct vhost_dev
> *dev, const uint8_t *data,
> return 0;
> }
>
> +static void vhost_user_set_used_memslots(struct vhost_dev *dev)
> +{
> + int fds[VHOST_MEMORY_MAX_NREGIONS];
> + VhostUserMsg msg;
> +
> + vhost_user_prepare_msg(dev, &msg.payload.memory, fds);
> +}
> +
> const VhostOps user_ops = {
> .backend_type = VHOST_BACKEND_TYPE_USER,
> .vhost_backend_init = vhost_user_init,
> .vhost_backend_cleanup = vhost_user_cleanup,
> - .vhost_backend_memslots_limit = vhost_user_memslots_limit,
> + .vhost_backend_has_free_memslots = vhost_user_has_free_memslots,
> .vhost_set_log_base = vhost_user_set_log_base,
> .vhost_set_mem_table = vhost_user_set_mem_table,
> .vhost_set_vring_addr = vhost_user_set_vring_addr,
> @@ -1082,4 +1106,5 @@ const VhostOps user_ops = {
> .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
> .vhost_get_config = vhost_user_get_config,
> .vhost_set_config = vhost_user_set_config,
> + .vhost_set_used_memslots = vhost_user_set_used_memslots,
> };
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 4a583a3..7f17de9 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -44,20 +44,19 @@
> static struct vhost_log *vhost_log;
> static struct vhost_log *vhost_log_shm;
>
> -static unsigned int used_memslots;
> static QLIST_HEAD(, vhost_dev) vhost_devices =
> QLIST_HEAD_INITIALIZER(vhost_devices);
>
> bool vhost_has_free_slot(void)
> {
> - unsigned int slots_limit = ~0U;
> struct vhost_dev *hdev;
>
> QLIST_FOREACH(hdev, &vhost_devices, entry) {
> - unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> - slots_limit = MIN(slots_limit, r);
> + if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
> + return false;
> + }
> }
> - return slots_limit > used_memslots;
> + return true;
> }
>
> static void vhost_dev_sync_region(struct vhost_dev *dev,
> @@ -442,7 +441,7 @@ static void vhost_commit(MemoryListener *listener)
> dev->n_mem_sections * sizeof dev->mem->regions[0];
> dev->mem = g_realloc(dev->mem, regions_size);
> dev->mem->nregions = dev->n_mem_sections;
> - used_memslots = dev->mem->nregions;
> + dev->vhost_ops->vhost_set_used_memslots(dev);
> for (i = 0; i < dev->n_mem_sections; i++) {
> struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
> struct MemoryRegionSection *mrs = dev->mem_sections + i;
> @@ -1186,7 +1185,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> memory_listener_register(&hdev->memory_listener, &address_space_memory);
> QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>
> - if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev))
> {
> + if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
> error_report("vhost backend memory slots limit is less"
> " than current number of present memory slots");
> r = -1;
> diff --git a/include/hw/virtio/vhost-backend.h
> b/include/hw/virtio/vhost-backend.h
> index 592254f..2eac224 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -36,7 +36,7 @@ struct vhost_iotlb_msg;
>
> typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
> typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
> -typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
> +typedef bool (*vhost_backend_has_free_memslots)(struct vhost_dev *dev);
>
> typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev,
> struct vhost_vring_file *file);
> @@ -94,12 +94,13 @@ typedef int (*vhost_set_config_op)(struct vhost_dev *dev,
> const uint8_t *data,
> uint32_t flags);
> typedef int (*vhost_get_config_op)(struct vhost_dev *dev, uint8_t *config,
> uint32_t config_len);
> +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
>
> typedef struct VhostOps {
> VhostBackendType backend_type;
> vhost_backend_init vhost_backend_init;
> vhost_backend_cleanup vhost_backend_cleanup;
> - vhost_backend_memslots_limit vhost_backend_memslots_limit;
> + vhost_backend_has_free_memslots vhost_backend_has_free_memslots;
> vhost_net_set_backend_op vhost_net_set_backend;
> vhost_net_set_mtu_op vhost_net_set_mtu;
> vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint;
> @@ -130,6 +131,7 @@ typedef struct VhostOps {
> vhost_send_device_iotlb_msg_op vhost_send_device_iotlb_msg;
> vhost_get_config_op vhost_get_config;
> vhost_set_config_op vhost_set_config;
> + vhost_set_used_memslots_op vhost_set_used_memslots;
> } VhostOps;
>
> extern const VhostOps user_ops;
> --
> 1.8.3.1
>
- Re: [Qemu-devel] [PATCH v8 2/2] vhost: used_memslots refactoring,
Michael S. Tsirkin <=