qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH v4 19/20] vhost: Use a tree to store memory mappings


From: Eugenio Pérez
Subject: [RFC PATCH v4 19/20] vhost: Use a tree to store memory mappings
Date: Fri, 1 Oct 2021 09:06:02 +0200

Track memory translations of devices with IOMMU (all vhost-vdpa
devices at the moment). It does not work if device has restrictions in
its iova range at the moment.

Updates to tree are protected by BQL, each one always run from main
event loop context.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 include/hw/virtio/vhost-vdpa.h |  3 ++
 hw/virtio/vhost-vdpa.c         | 59 ++++++++++++++++++++++++++++++++++
 2 files changed, 62 insertions(+)

diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 9044ae694b..7353e36884 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -15,6 +15,7 @@
 #include <gmodule.h>
 
 #include "qemu/queue.h"
+#include "hw/virtio/vhost-iova-tree.h"
 #include "hw/virtio/virtio.h"
 
 typedef struct VhostVDPAHostNotifier {
@@ -29,6 +30,8 @@ typedef struct vhost_vdpa {
     uint64_t host_features;
     uint64_t guest_features;
     bool shadow_vqs_enabled;
+    /* IOVA mapping used by Shadow Virtqueue */
+    VhostIOVATree *iova_map;
     GPtrArray *shadow_vqs;
     struct vhost_dev *dev;
     QLIST_ENTRY(vhost_vdpa) entry;
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index bb7010ddb5..a9c680b487 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -395,6 +395,7 @@ static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
         vhost_svq_stop(dev, idx, g_ptr_array_index(v->shadow_vqs, idx));
     }
     g_ptr_array_free(v->shadow_vqs, true);
+    g_clear_pointer(&v->iova_map, vhost_iova_tree_unref);
 }
 
 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
@@ -753,6 +754,22 @@ static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
     return true;
 }
 
+/**
+ * Maps QEMU vaddr memory to device in a suitable way for shadow virtqueue:
+ * - It always reference qemu memory address, not guest's memory.
+ * - TODO It's always in range of device.
+ *
+ * It returns the translated address
+ */
+static int vhost_vdpa_svq_map(struct vhost_vdpa *v, VhostDMAMap *map)
+{
+    int r = vhost_iova_tree_alloc(v->iova_map, map);
+    assert(r == VHOST_DMA_MAP_OK);
+
+    return vhost_vdpa_dma_map(v, map->iova, map->size, map->translated_addr,
+                              false);
+}
+
 static int vhost_vdpa_vring_pause(struct vhost_dev *dev)
 {
     int r;
@@ -771,6 +788,7 @@ static int vhost_vdpa_vring_pause(struct vhost_dev *dev)
  */
 static bool vhost_vdpa_svq_start_vq(struct vhost_dev *dev, unsigned idx)
 {
+    VhostDMAMap device_region, driver_region;
     struct vhost_vdpa *v = dev->opaque;
     VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, idx);
     EventNotifier *vhost_call_notifier = vhost_svq_get_svq_call_notifier(svq);
@@ -789,6 +807,33 @@ static bool vhost_vdpa_svq_start_vq(struct vhost_dev *dev, 
unsigned idx)
     bool b;
 
     vhost_svq_get_vring_addr(svq, &addr);
+    driver_region = (VhostDMAMap) {
+        .translated_addr = (void *)addr.desc_user_addr,
+
+        /*
+         * DMAMAp.size include the last byte included in the range, while
+         * sizeof marks one past it. Substract one byte to make them match.
+         */
+        .size = vhost_svq_driver_area_size(svq) - 1,
+        .perm = VHOST_ACCESS_RO,
+    };
+    device_region = (VhostDMAMap) {
+        .translated_addr = (void *)addr.used_user_addr,
+        .size = vhost_svq_device_area_size(svq) - 1,
+        .perm = VHOST_ACCESS_RW,
+    };
+
+    r = vhost_vdpa_svq_map(v, &driver_region);
+    assert(r == 0);
+    r = vhost_vdpa_svq_map(v, &device_region);
+    assert(r == 0);
+
+    /* Expose IOVA addresses to vDPA device */
+    addr.avail_user_addr = driver_region.iova + addr.avail_user_addr
+                           - addr.desc_user_addr;
+    addr.desc_user_addr = driver_region.iova;
+    addr.used_user_addr = device_region.iova;
+
     r = vhost_vdpa_set_vring_addr(dev, &addr);
     if (unlikely(r)) {
         error_report("vhost_set_vring_addr for shadow vq failed");
@@ -822,6 +867,17 @@ static bool vhost_vdpa_svq_start_vq(struct vhost_dev *dev, 
unsigned idx)
     return true;
 }
 
+/**
+ * Enable or disable shadow virtqueue in a vhost vdpa device.
+ *
+ * This function is idempotent, to call it many times with the same value for
+ * enable_svq will simply return success.
+ *
+ * @v       The vhost vdpa device
+ * @enable  The value of shadow virtqueue we want.
+ *
+ * Returns the number of queues changed.
+ */
 static unsigned vhost_vdpa_enable_svq(struct vhost_vdpa *v, bool enable)
 {
     struct vhost_dev *hdev = v->dev;
@@ -833,6 +889,8 @@ static unsigned vhost_vdpa_enable_svq(struct vhost_vdpa *v, 
bool enable)
     }
 
     if (enable) {
+        v->iova_map = vhost_iova_tree_new();
+
         /* Allocate resources */
         assert(v->shadow_vqs->len == 0);
         for (n = 0; n < hdev->nvqs; ++n) {
@@ -907,6 +965,7 @@ static unsigned vhost_vdpa_enable_svq(struct vhost_vdpa *v, 
bool enable)
     if (!enable) {
         /* Resources cleanup */
         g_ptr_array_set_size(v->shadow_vqs, 0);
+        g_clear_pointer(&v->iova_map, vhost_iova_tree_unref);
     }
 
     return n;
-- 
2.27.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]