qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 4/7] kvm-all: make KVM's memory listener more generi


From: Paolo Bonzini
Subject: [Qemu-devel] [PATCH 4/7] kvm-all: make KVM's memory listener more generic
Date: Mon, 18 May 2015 17:28:37 +0200

No semantic change, but s->slots and s->migration_log move into a new
struct KVMMemoryListener.  KVM's memory listener becomes a member of
struct KVMState, and becomes of type KVMMemoryListener.

Signed-off-by: Paolo Bonzini <address@hidden>
---
 include/sysemu/kvm_int.h |   6 ++
 kvm-all.c                | 174 +++++++++++++++++++++++++++--------------------
 2 files changed, 106 insertions(+), 74 deletions(-)

diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
index e8dcbd7..f2698fa 100644
--- a/include/sysemu/kvm_int.h
+++ b/include/sysemu/kvm_int.h
@@ -22,6 +22,12 @@ typedef struct KVMSlot
     int flags;
 } KVMSlot;
 
+typedef struct KVMMemoryListener {
+    MemoryListener listener;
+    KVMSlot *slots;
+    int migration_log;
+} KVMMemoryListener;
+
 #define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
 
 #define KVM_STATE(obj) \
diff --git a/kvm-all.c b/kvm-all.c
index 4499ff8..2700182 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -62,7 +62,6 @@ struct KVMState
 {
     AccelState parent_obj;
 
-    KVMSlot *slots;
     int nr_slots;
     int fd;
     int vmfd;
@@ -70,7 +69,6 @@ struct KVMState
     struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
     bool coalesced_flush_in_progress;
     int broken_set_mem_region;
-    int migration_log;
     int vcpu_events;
     int robust_singlestep;
     int debugregs;
@@ -116,13 +114,14 @@ static const KVMCapabilityInfo kvm_required_capabilites[] 
= {
     KVM_CAP_LAST_INFO
 };
 
-static KVMSlot *kvm_get_free_slot(KVMState *s)
+static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
 {
+    KVMState *s = kvm_state;
     int i;
 
     for (i = 0; i < s->nr_slots; i++) {
-        if (s->slots[i].memory_size == 0) {
-            return &s->slots[i];
+        if (kml->slots[i].memory_size == 0) {
+            return &kml->slots[i];
         }
     }
 
@@ -131,12 +130,14 @@ static KVMSlot *kvm_get_free_slot(KVMState *s)
 
 bool kvm_has_free_slot(MachineState *ms)
 {
-    return kvm_get_free_slot(KVM_STATE(ms->accelerator));
+    KVMState *s = KVM_STATE(ms->accelerator);
+
+    return kvm_get_free_slot(&s->memory_listener);
 }
 
-static KVMSlot *kvm_alloc_slot(KVMState *s)
+static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
 {
-    KVMSlot *slot = kvm_get_free_slot(s);
+    KVMSlot *slot = kvm_get_free_slot(kml);
 
     if (slot) {
         return slot;
@@ -146,14 +147,15 @@ static KVMSlot *kvm_alloc_slot(KVMState *s)
     abort();
 }
 
-static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
+static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
                                          hwaddr start_addr,
                                          hwaddr end_addr)
 {
+    KVMState *s = kvm_state;
     int i;
 
     for (i = 0; i < s->nr_slots; i++) {
-        KVMSlot *mem = &s->slots[i];
+        KVMSlot *mem = &kml->slots[i];
 
         if (start_addr == mem->start_addr &&
             end_addr == mem->start_addr + mem->memory_size) {
@@ -167,15 +169,16 @@ static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
 /*
  * Find overlapping slot with lowest start address
  */
-static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
+static KVMSlot *kvm_lookup_overlapping_slot(KVMMemoryListener *kml,
                                             hwaddr start_addr,
                                             hwaddr end_addr)
 {
+    KVMState *s = kvm_state;
     KVMSlot *found = NULL;
     int i;
 
     for (i = 0; i < s->nr_slots; i++) {
-        KVMSlot *mem = &s->slots[i];
+        KVMSlot *mem = &kml->slots[i];
 
         if (mem->memory_size == 0 ||
             (found && found->start_addr < mem->start_addr)) {
@@ -194,10 +197,11 @@ static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
                                        hwaddr *phys_addr)
 {
+    KVMMemoryListener *kml = &s->memory_listener;
     int i;
 
     for (i = 0; i < s->nr_slots; i++) {
-        KVMSlot *mem = &s->slots[i];
+        KVMSlot *mem = &kml->slots[i];
 
         if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
             *phys_addr = mem->start_addr + (ram - mem->ram);
@@ -208,15 +212,16 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void 
*ram,
     return 0;
 }
 
-static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
+static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot)
 {
+    KVMState *s = kvm_state;
     struct kvm_userspace_memory_region mem;
 
     mem.slot = slot->slot;
     mem.guest_phys_addr = slot->start_addr;
     mem.userspace_addr = (unsigned long)slot->ram;
     mem.flags = slot->flags;
-    if (s->migration_log) {
+    if (kml->migration_log) {
         mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
     }
 
@@ -291,9 +296,9 @@ static int kvm_mem_flags(MemoryRegion *mr)
     return flags;
 }
 
-static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
+static int kvm_slot_dirty_pages_log_change(KVMMemoryListener *kml,
+                                           KVMSlot *mem, bool log_dirty)
 {
-    KVMState *s = kvm_state;
     int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
     int old_flags;
 
@@ -306,7 +311,7 @@ static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, 
bool log_dirty)
     mem->flags = flags;
 
     /* If nothing changed effectively, no need to issue ioctl */
-    if (s->migration_log) {
+    if (kml->migration_log) {
         flags |= KVM_MEM_LOG_DIRTY_PAGES;
     }
 
@@ -314,14 +319,13 @@ static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, 
bool log_dirty)
         return 0;
     }
 
-    return kvm_set_user_memory_region(s, mem);
+    return kvm_set_user_memory_region(kml, mem);
 }
 
-static int kvm_dirty_pages_log_change(hwaddr phys_addr,
+static int kvm_dirty_pages_log_change(KVMMemoryListener *kml, hwaddr phys_addr,
                                       ram_addr_t size, bool log_dirty)
 {
-    KVMState *s = kvm_state;
-    KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
+    KVMSlot *mem = kvm_lookup_matching_slot(kml, phys_addr, phys_addr + size);
 
     if (mem == NULL)  {
         fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
@@ -329,15 +333,16 @@ static int kvm_dirty_pages_log_change(hwaddr phys_addr,
                 (hwaddr)(phys_addr + size - 1));
         return -EINVAL;
     }
-    return kvm_slot_dirty_pages_log_change(mem, log_dirty);
+    return kvm_slot_dirty_pages_log_change(kml, mem, log_dirty);
 }
 
 static void kvm_log_start(MemoryListener *listener,
                           MemoryRegionSection *section)
 {
+    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, 
listener);
     int r;
 
-    r = kvm_dirty_pages_log_change(section->offset_within_address_space,
+    r = kvm_dirty_pages_log_change(kml, section->offset_within_address_space,
                                    int128_get64(section->size), true);
     if (r < 0) {
         abort();
@@ -347,25 +352,26 @@ static void kvm_log_start(MemoryListener *listener,
 static void kvm_log_stop(MemoryListener *listener,
                           MemoryRegionSection *section)
 {
+    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, 
listener);
     int r;
 
-    r = kvm_dirty_pages_log_change(section->offset_within_address_space,
+    r = kvm_dirty_pages_log_change(kml, section->offset_within_address_space,
                                    int128_get64(section->size), false);
     if (r < 0) {
         abort();
     }
 }
 
-static int kvm_set_migration_log(bool enable)
+static int kvm_set_migration_log(KVMMemoryListener *kml, bool enable)
 {
     KVMState *s = kvm_state;
     KVMSlot *mem;
     int i, err;
 
-    s->migration_log = enable;
+    kml->migration_log = enable;
 
     for (i = 0; i < s->nr_slots; i++) {
-        mem = &s->slots[i];
+        mem = &kml->slots[i];
 
         if (!mem->memory_size) {
             continue;
@@ -373,7 +379,7 @@ static int kvm_set_migration_log(bool enable)
         if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
             continue;
         }
-        err = kvm_set_user_memory_region(s, mem);
+        err = kvm_set_user_memory_region(kml, mem);
         if (err) {
             return err;
         }
@@ -403,7 +409,8 @@ static int 
kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
  * @start_add: start of logged region.
  * @end_addr: end of logged region.
  */
-static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
+static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
+                                          MemoryRegionSection *section)
 {
     KVMState *s = kvm_state;
     unsigned long size, allocated_size = 0;
@@ -415,7 +422,7 @@ static int 
kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
 
     d.dirty_bitmap = NULL;
     while (start_addr < end_addr) {
-        mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
+        mem = kvm_lookup_overlapping_slot(kml, start_addr, end_addr);
         if (mem == NULL) {
             break;
         }
@@ -646,7 +653,8 @@ kvm_check_extension_list(KVMState *s, const 
KVMCapabilityInfo *list)
     return NULL;
 }
 
-static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
+static void kvm_set_phys_mem(KVMMemoryListener *kml,
+                             MemoryRegionSection *section, bool add)
 {
     KVMState *s = kvm_state;
     KVMSlot *mem, old;
@@ -686,7 +694,7 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, 
bool add)
     ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + 
delta;
 
     while (1) {
-        mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
+        mem = kvm_lookup_overlapping_slot(kml, start_addr, start_addr + size);
         if (!mem) {
             break;
         }
@@ -696,19 +704,19 @@ static void kvm_set_phys_mem(MemoryRegionSection 
*section, bool add)
             (ram - start_addr == mem->ram - mem->start_addr)) {
             /* The new slot fits into the existing one and comes with
              * identical parameters - update flags and done. */
-            kvm_slot_dirty_pages_log_change(mem, memory_region_is_logging(mr));
+            kvm_slot_dirty_pages_log_change(kml, mem, 
memory_region_is_logging(mr));
             return;
         }
 
         old = *mem;
 
-        if ((mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || s->migration_log) {
-            kvm_physical_sync_dirty_bitmap(section);
+        if ((mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || kml->migration_log) {
+            kvm_physical_sync_dirty_bitmap(kml, section);
         }
 
         /* unregister the overlapping slot */
         mem->memory_size = 0;
-        err = kvm_set_user_memory_region(s, mem);
+        err = kvm_set_user_memory_region(kml, mem);
         if (err) {
             fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
                     __func__, strerror(-err));
@@ -725,13 +733,13 @@ static void kvm_set_phys_mem(MemoryRegionSection 
*section, bool add)
          * - and actually require a recent KVM version. */
         if (s->broken_set_mem_region &&
             old.start_addr == start_addr && old.memory_size < size && add) {
-            mem = kvm_alloc_slot(s);
+            mem = kvm_alloc_slot(kml);
             mem->memory_size = old.memory_size;
             mem->start_addr = old.start_addr;
             mem->ram = old.ram;
             mem->flags = kvm_mem_flags(mr);
 
-            err = kvm_set_user_memory_region(s, mem);
+            err = kvm_set_user_memory_region(kml, mem);
             if (err) {
                 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
                         strerror(-err));
@@ -746,13 +754,13 @@ static void kvm_set_phys_mem(MemoryRegionSection 
*section, bool add)
 
         /* register prefix slot */
         if (old.start_addr < start_addr) {
-            mem = kvm_alloc_slot(s);
+            mem = kvm_alloc_slot(kml);
             mem->memory_size = start_addr - old.start_addr;
             mem->start_addr = old.start_addr;
             mem->ram = old.ram;
             mem->flags =  kvm_mem_flags(mr);
 
-            err = kvm_set_user_memory_region(s, mem);
+            err = kvm_set_user_memory_region(kml, mem);
             if (err) {
                 fprintf(stderr, "%s: error registering prefix slot: %s\n",
                         __func__, strerror(-err));
@@ -769,14 +777,14 @@ static void kvm_set_phys_mem(MemoryRegionSection 
*section, bool add)
         if (old.start_addr + old.memory_size > start_addr + size) {
             ram_addr_t size_delta;
 
-            mem = kvm_alloc_slot(s);
+            mem = kvm_alloc_slot(kml);
             mem->start_addr = start_addr + size;
             size_delta = mem->start_addr - old.start_addr;
             mem->memory_size = old.memory_size - size_delta;
             mem->ram = old.ram + size_delta;
             mem->flags = kvm_mem_flags(mr);
 
-            err = kvm_set_user_memory_region(s, mem);
+            err = kvm_set_user_memory_region(kml, mem);
             if (err) {
                 fprintf(stderr, "%s: error registering suffix slot: %s\n",
                         __func__, strerror(-err));
@@ -792,13 +800,13 @@ static void kvm_set_phys_mem(MemoryRegionSection 
*section, bool add)
     if (!add) {
         return;
     }
-    mem = kvm_alloc_slot(s);
+    mem = kvm_alloc_slot(kml);
     mem->memory_size = size;
     mem->start_addr = start_addr;
     mem->ram = ram;
     mem->flags = kvm_mem_flags(mr);
 
-    err = kvm_set_user_memory_region(s, mem);
+    err = kvm_set_user_memory_region(kml, mem);
     if (err) {
         fprintf(stderr, "%s: error registering slot: %s\n", __func__,
                 strerror(-err));
@@ -809,23 +817,28 @@ static void kvm_set_phys_mem(MemoryRegionSection 
*section, bool add)
 static void kvm_region_add(MemoryListener *listener,
                            MemoryRegionSection *section)
 {
+    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, 
listener);
+
     memory_region_ref(section->mr);
-    kvm_set_phys_mem(section, true);
+    kvm_set_phys_mem(kml, section, true);
 }
 
 static void kvm_region_del(MemoryListener *listener,
                            MemoryRegionSection *section)
 {
-    kvm_set_phys_mem(section, false);
+    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, 
listener);
+
+    kvm_set_phys_mem(kml, section, false);
     memory_region_unref(section->mr);
 }
 
 static void kvm_log_sync(MemoryListener *listener,
                          MemoryRegionSection *section)
 {
+    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, 
listener);
     int r;
 
-    r = kvm_physical_sync_dirty_bitmap(section);
+    r = kvm_physical_sync_dirty_bitmap(kml, section);
     if (r < 0) {
         abort();
     }
@@ -833,17 +846,19 @@ static void kvm_log_sync(MemoryListener *listener,
 
 static void kvm_log_global_start(struct MemoryListener *listener)
 {
+    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, 
listener);
     int r;
 
-    r = kvm_set_migration_log(1);
+    r = kvm_set_migration_log(kml, 1);
     assert(r >= 0);
 }
 
 static void kvm_log_global_stop(struct MemoryListener *listener)
 {
+    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, 
listener);
     int r;
 
-    r = kvm_set_migration_log(0);
+    r = kvm_set_migration_log(kml, 0);
     assert(r >= 0);
 }
 
@@ -916,20 +931,29 @@ static void kvm_io_ioeventfd_del(MemoryListener *listener,
     }
 }
 
-static MemoryListener kvm_memory_listener = {
-    .region_add = kvm_region_add,
-    .region_del = kvm_region_del,
-    .log_start = kvm_log_start,
-    .log_stop = kvm_log_stop,
-    .log_sync = kvm_log_sync,
-    .log_global_start = kvm_log_global_start,
-    .log_global_stop = kvm_log_global_stop,
-    .eventfd_add = kvm_mem_ioeventfd_add,
-    .eventfd_del = kvm_mem_ioeventfd_del,
-    .coalesced_mmio_add = kvm_coalesce_mmio_region,
-    .coalesced_mmio_del = kvm_uncoalesce_mmio_region,
-    .priority = 10,
-};
+static void kvm_memory_listener_register(KVMState *s,
+                                         KVMMemoryListener *kml,
+                                         AddressSpace *as)
+{
+    int i;
+
+    kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
+
+    for (i = 0; i < s->nr_slots; i++) {
+        kml->slots[i].slot = i;
+    }
+
+    kml->listener.region_add = kvm_region_add;
+    kml->listener.region_del = kvm_region_del;
+    kml->listener.log_start = kvm_log_start;
+    kml->listener.log_stop = kvm_log_stop;
+    kml->listener.log_sync = kvm_log_sync;
+    kml->listener.log_global_start = kvm_log_global_start;
+    kml->listener.log_global_stop = kvm_log_global_stop;
+    kml->listener.priority = 10;
+
+    memory_listener_register(&kml->listener, as);
+}
 
 static MemoryListener kvm_io_listener = {
     .eventfd_add = kvm_io_ioeventfd_add,
@@ -1437,7 +1461,7 @@ static int kvm_init(MachineState *ms)
     KVMState *s;
     const KVMCapabilityInfo *missing_cap;
     int ret;
-    int i, type = 0;
+    int type = 0;
     const char *kvm_type;
 
     s = KVM_STATE(ms->accelerator);
@@ -1486,12 +1510,6 @@ static int kvm_init(MachineState *ms)
         s->nr_slots = 32;
     }
 
-    s->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
-
-    for (i = 0; i < s->nr_slots; i++) {
-        s->slots[i].slot = i;
-    }
-
     /* check the vcpu limits */
     soft_vcpus_limit = kvm_recommended_vcpus(s);
     hard_vcpus_limit = kvm_max_vcpus(s);
@@ -1629,8 +1647,16 @@ static int kvm_init(MachineState *ms)
     }
 
     kvm_state = s;
-    memory_listener_register(&kvm_memory_listener, &address_space_memory);
-    memory_listener_register(&kvm_io_listener, &address_space_io);
+
+    s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
+    s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
+    s->memory_listener.listener.coalesced_mmio_add = kvm_coalesce_mmio_region;
+    s->memory_listener.listener.coalesced_mmio_del = 
kvm_uncoalesce_mmio_region;
+
+    kvm_memory_listener_register(s, &s->memory_listener,
+                                 &address_space_memory);
+    memory_listener_register(&kvm_io_listener,
+                             &address_space_io);
 
     s->many_ioeventfds = kvm_check_many_ioeventfds();
 
@@ -1646,7 +1672,7 @@ err:
     if (s->fd != -1) {
         close(s->fd);
     }
-    g_free(s->slots);
+    g_free(s->memory_listener.slots);
 
     return ret;
 }
-- 
1.8.3.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]