[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 10/18] vfio/common: Extract code from vfio_get_dirty_bitmap() to
From: |
Avihai Horon |
Subject: |
[PATCH 10/18] vfio/common: Extract code from vfio_get_dirty_bitmap() to new function |
Date: |
Thu, 26 Jan 2023 20:49:40 +0200 |
Extract the VFIO_IOMMU_DIRTY_PAGES ioctl code in vfio_get_dirty_bitmap()
to its own function.
This will help the code to be more readable after next patch will add
device dirty page bitmap sync functionality.
Signed-off-by: Avihai Horon <avihaih@nvidia.com>
---
hw/vfio/common.c | 53 ++++++++++++++++++++++++++++++------------------
1 file changed, 33 insertions(+), 20 deletions(-)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 005c060c67..3caa73d6f7 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -1584,26 +1584,13 @@ static void
vfio_listener_log_global_stop(MemoryListener *listener)
}
}
-static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
- uint64_t size, ram_addr_t ram_addr)
+static int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size)
{
struct vfio_iommu_type1_dirty_bitmap *dbitmap;
struct vfio_iommu_type1_dirty_bitmap_get *range;
- VFIOBitmap *vbmap;
int ret;
- if (!container->dirty_pages_supported) {
- cpu_physical_memory_set_dirty_range(ram_addr, size,
- tcg_enabled() ? DIRTY_CLIENTS_ALL :
- DIRTY_CLIENTS_NOCODE);
- return 0;
- }
-
- vbmap = vfio_bitmap_alloc(size);
- if (!vbmap) {
- return -errno;
- }
-
dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
@@ -1627,16 +1614,42 @@ static int vfio_get_dirty_bitmap(VFIOContainer
*container, uint64_t iova,
error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
" size: 0x%"PRIx64" err: %d", (uint64_t)range->iova,
(uint64_t)range->size, errno);
- goto err_out;
+ }
+
+ g_free(dbitmap);
+
+ return ret;
+}
+
+static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
+ uint64_t size, ram_addr_t ram_addr)
+{
+ VFIOBitmap *vbmap;
+ int ret;
+
+ if (!container->dirty_pages_supported) {
+ cpu_physical_memory_set_dirty_range(ram_addr, size,
+ tcg_enabled() ? DIRTY_CLIENTS_ALL :
+ DIRTY_CLIENTS_NOCODE);
+ return 0;
+ }
+
+ vbmap = vfio_bitmap_alloc(size);
+ if (!vbmap) {
+ return -errno;
+ }
+
+ ret = vfio_query_dirty_bitmap(container, vbmap, iova, size);
+ if (ret) {
+ goto out;
}
cpu_physical_memory_set_dirty_lebitmap(vbmap->bitmap, ram_addr,
vbmap->pages);
- trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size,
- range->bitmap.size, ram_addr);
-err_out:
- g_free(dbitmap);
+ trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap->size,
+ ram_addr);
+out:
vfio_bitmap_dealloc(vbmap);
return ret;
--
2.26.3
- [PATCH 08/18] vfio/common: Record DMA mapped IOVA ranges, (continued)
- [PATCH 08/18] vfio/common: Record DMA mapped IOVA ranges, Avihai Horon, 2023/01/26
- [PATCH 09/18] vfio/common: Add device dirty page tracking start/stop, Avihai Horon, 2023/01/26
- [PATCH 06/18] util: Add iova_tree_nnodes(), Avihai Horon, 2023/01/26
- [PATCH 17/18] vfio/migration: Query device dirty page tracking support, Avihai Horon, 2023/01/26
- [PATCH 11/18] vfio/common: Add device dirty page bitmap sync, Avihai Horon, 2023/01/26
- [PATCH 13/18] memory/iommu: Add IOMMU_ATTR_MAX_IOVA attribute, Avihai Horon, 2023/01/26
- [PATCH 16/18] vfio/common: Optimize device dirty page tracking with vIOMMU, Avihai Horon, 2023/01/26
- [PATCH 12/18] vfio/common: Extract vIOMMU code from vfio_sync_dirty_bitmap(), Avihai Horon, 2023/01/26
- [PATCH 14/18] intel-iommu: Implement get_attr() method, Avihai Horon, 2023/01/26
- [PATCH 15/18] vfio/common: Support device dirty page tracking with vIOMMU, Avihai Horon, 2023/01/26
- [PATCH 10/18] vfio/common: Extract code from vfio_get_dirty_bitmap() to new function,
Avihai Horon <=
- [PATCH 18/18] docs/devel: Document VFIO device dirty page tracking, Avihai Horon, 2023/01/26