[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC 5/7] vhost-vdpa: Add the iommufd support in the map/unmap function
|
From: |
Cindy Lu |
|
Subject: |
[RFC 5/7] vhost-vdpa: Add the iommufd support in the map/unmap function |
|
Date: |
Wed, 3 May 2023 17:13:35 +0800 |
1.Change the map/umap function to legacy_map/unmap
2. Add the check for iommufd support,
a>If support iommufd, call the iommufd-related function
b>In order to use kernel's iotlb process. Still need to call
the legacy mode iotlb message, Kernel will check and
skip the legacy iotlb message if iommufd enable
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
hw/virtio/vhost-vdpa.c | 56 ++++++++++++++++++++++++++++++----
include/hw/virtio/vhost-vdpa.h | 24 +++++++++++++++
2 files changed, 74 insertions(+), 6 deletions(-)
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 542e003101..85240926b2 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -26,6 +26,7 @@
#include "cpu.h"
#include "trace.h"
#include "qapi/error.h"
+#include "sysemu/iommufd.h"
/*
* Return one past the end of the end of section. Be careful with uint64_t
@@ -76,8 +77,9 @@ static bool
vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
* The caller must set asid = 0 if the device does not support asid.
* This is not an ABI break since it is set to 0 by the initializer anyway.
*/
-int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
- hwaddr size, void *vaddr, bool readonly)
+
+int vhost_vdpa_leagcy_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+ hwaddr size, void *vaddr, bool readonly)
{
struct vhost_msg_v2 msg = {};
int fd = v->device_fd;
@@ -103,13 +105,32 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t
asid, hwaddr iova,
return ret;
}
+int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+ hwaddr size, void *vaddr, bool readonly)
+{
+ struct vhost_dev *dev = v->dev;
+
+ if ((v->enable_iommufd) && (v->ops == NULL)) {
+ vdpa_backend_iommufd_ops_class_init(v);
+ }
+
+ struct vdpa_iommu_backend_ops *ops = v->ops;
+ /* inoder to reuse the iotlb prcess to in kernel, still need to call leagcy
+ mode mapping but in kernel , the leagcy mode mapping was replace by
+ iommufd*/
+ if (v->enable_iommufd) {
+ ops->dma_map(dev, asid, iova, size, vaddr, readonly);
+ }
+ return vhost_vdpa_leagcy_dma_map(v, asid, iova, size, vaddr, readonly);
+}
/*
* The caller must set asid = 0 if the device does not support asid.
* This is not an ABI break since it is set to 0 by the initializer anyway.
*/
-int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
- hwaddr size)
+
+int vhost_vdpa_leagcy_dma_unmap(struct vhost_vdpa *v, uint32_t asid,
+ hwaddr iova, hwaddr size)
{
struct vhost_msg_v2 msg = {};
int fd = v->device_fd;
@@ -132,6 +153,26 @@ int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t
asid, hwaddr iova,
return ret;
}
+int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+ hwaddr size)
+{
+ struct vhost_dev *dev = v->dev;
+
+ if ((v->enable_iommufd) && (v->ops == NULL)) {
+ vdpa_backend_iommufd_ops_class_init(v);
+ }
+
+
+ /* inoder to reuse the iotlb prcess to in kernel, still need to call leagcy
+ mode mapping but in kernel , the leagcy mode mapping was replace by
+ iommufd*/
+ if (v->enable_iommufd) {
+ struct vdpa_iommu_backend_ops *ops = v->ops;
+
+ ops->dma_unmap(dev, asid, iova, size);
+ }
+ return vhost_vdpa_leagcy_dma_unmap(v, asid, iova, size);
+}
static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
{
@@ -423,13 +464,14 @@ static void vhost_vdpa_init_svq(struct vhost_dev *hdev,
struct vhost_vdpa *v)
v->shadow_vqs = g_steal_pointer(&shadow_vqs);
}
-
+int g_iommufd;
static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
{
struct vhost_vdpa *v;
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
trace_vhost_vdpa_init(dev, opaque);
int ret;
+ printf("[%s] %d called\n", __func__, __LINE__);
/*
* Similar to VFIO, we end up pinning all guest memory and have to
@@ -580,7 +622,9 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
memory_listener_unregister(&v->listener);
vhost_vdpa_svq_cleanup(dev);
-
+ if (vhost_vdpa_first_dev(dev)) {
+ v->ops->detach_device(v);
+ }
dev->opaque = NULL;
ram_block_discard_disable(false);
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 309d4ffc70..aa0e3ed65b 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -55,6 +55,10 @@ typedef struct vhost_vdpa {
void *shadow_vq_ops_opaque;
struct vhost_dev *dev;
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
+ /*iommufd related*/
+ struct vdpa_iommu_backend_ops *ops;
+ bool enable_iommufd;
+
} VhostVDPA;
@@ -76,9 +80,29 @@ typedef struct vdpa_iommufd {
int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range
*iova_range);
+int vhost_vdpa_leagcy_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+ hwaddr size, void *vaddr, bool readonly);
+int vhost_vdpa_leagcy_dma_unmap(struct vhost_vdpa *v, uint32_t asid,
+ hwaddr iova, hwaddr size);
+
int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
hwaddr size, void *vaddr, bool readonly);
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
hwaddr size);
+struct vdpa_iommu_backend_ops {
+ /*< private >*/
+ ObjectClass parent_class;
+ int (*dma_map)(struct vhost_dev *dev, uint32_t asid, hwaddr iova,
+ hwaddr size, void *vaddr, bool readonly);
+ int (*dma_unmap)(struct vhost_dev *dev, uint32_t asid, hwaddr iova,
+ hwaddr size);
+ int (*attach_device)(struct vhost_vdpa *dev, AddressSpace *as,
+ Error **errp);
+ void (*detach_device)(struct vhost_vdpa *dev);
+ int (*reset)(VDPAIOMMUFDState *vdpa_iommufd);
+};
+
+void vdpa_backend_iommufd_ops_class_init(struct vhost_vdpa *v);
+
#endif
--
2.34.3
- [RFC 0/7] vhost-vdpa: add support for iommufd, Cindy Lu, 2023/05/03
- [RFC 1/7] vhost: introduce new UAPI to support IOMMUFD, Cindy Lu, 2023/05/03
- [RFC 3/7] virtio : add a ptr for vdpa_iommufd in VirtIODevice, Cindy Lu, 2023/05/03
- [RFC 2/7] qapi: support iommufd in vdpa, Cindy Lu, 2023/05/03
- [RFC 6/7] vhost-vdpa: init iommufd function in vhost_vdpa start, Cindy Lu, 2023/05/03
- [RFC 4/7] net/vhost-vdpa: Add the check for iommufd, Cindy Lu, 2023/05/03
- [RFC 5/7] vhost-vdpa: Add the iommufd support in the map/unmap function,
Cindy Lu <=
- [RFC 7/7] vhost-vdpa-iommufd: Add iommufd support for vdpa, Cindy Lu, 2023/05/03
- Re: [RFC 0/7] vhost-vdpa: add support for iommufd, Jason Wang, 2023/05/04