qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH COLO-Frame v11 32/39] COLO: Separate the process


From: Hailiang Zhang
Subject: Re: [Qemu-devel] [PATCH COLO-Frame v11 32/39] COLO: Separate the process of saving/loading ram and device state
Date: Tue, 1 Dec 2015 20:07:47 +0800
User-agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Thunderbird/38.4.0

On 2015/11/27 13:10, Li Zhijian wrote:
On 11/24/2015 05:25 PM, zhanghailiang wrote:
We separate the process of saving/loading ram and device state when do 
checkpoint,
we add new helpers for save/load ram/device. With this change, we can directly
transfer ram from master to slave without using QEMUSizeBuffer as assistant,
which also reduce the size of extra memory been used during checkpoint.

Besides, we move the colo_flush_ram_cache to the proper position after the
above change.

Signed-off-by: zhanghailiang <address@hidden>
Signed-off-by: Li Zhijian <address@hidden>
---
v11:
- Remove load configuration section in qemu_loadvm_state_begin()
---
  include/sysemu/sysemu.h |   6 +++
  migration/colo.c        |  43 ++++++++++++----
  migration/ram.c         |   5 --
  migration/savevm.c      | 132 ++++++++++++++++++++++++++++++++++++++++++++++--
  4 files changed, 168 insertions(+), 18 deletions(-)

diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index 91eeda3..5deae53 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -133,7 +133,13 @@ void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, 
const char *name,
                                             uint64_t *start_list,
                                             uint64_t *length_list);
+int qemu_save_ram_precopy(QEMUFile *f);
+int qemu_save_device_state(QEMUFile *f);
+
  int qemu_loadvm_state(QEMUFile *f);
+int qemu_loadvm_state_begin(QEMUFile *f);
+int qemu_load_ram_state(QEMUFile *f);
+int qemu_load_device_state(QEMUFile *f);
  typedef enum DisplayType
  {
diff --git a/migration/colo.c b/migration/colo.c
index 3866e86..f7f349b 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -247,21 +247,32 @@ static int colo_do_checkpoint_transaction(MigrationState 
*s,
          goto out;
      }
+    ret = colo_ctl_put(s->to_dst_file, COLO_COMMAND_VMSTATE_SEND, 0);
+    if (ret < 0) {
+        goto out;
+    }
      /* Disable block migration */
      s->params.blk = 0;
      s->params.shared = 0;
-    qemu_savevm_state_header(trans);
-    qemu_savevm_state_begin(trans, &s->params);
-    qemu_mutex_lock_iothread();
-    qemu_savevm_state_complete_precopy(trans, false);
-    qemu_mutex_unlock_iothread();
-
-    qemu_fflush(trans);
+    qemu_savevm_state_begin(s->to_dst_file, &s->params);
+    ret = qemu_file_get_error(s->to_dst_file);
+    if (ret < 0) {
+        error_report("save vm state begin error\n");
+        goto out;
+    }
-    ret = colo_ctl_put(s->to_dst_file, COLO_COMMAND_VMSTATE_SEND, 0);
+    qemu_mutex_lock_iothread();
+    /* Note: device state is saved into buffer */
+    ret = qemu_save_device_state(trans);
      if (ret < 0) {
+        error_report("save device state error\n");
+        qemu_mutex_unlock_iothread();
          goto out;
      }
+    qemu_fflush(trans);
+    qemu_save_ram_precopy(s->to_dst_file);
+    qemu_mutex_unlock_iothread();
+
      /* we send the total size of the vmstate first */
      size = qsb_get_length(buffer);
      ret = colo_ctl_put(s->to_dst_file, COLO_COMMAND_VMSTATE_SIZE, size);
@@ -548,6 +559,16 @@ void *colo_process_incoming_thread(void *opaque)
              goto out;
          }
+        ret = qemu_loadvm_state_begin(mis->from_src_file);
+        if (ret < 0) {
+            error_report("load vm state begin error, ret=%d", ret);
+            goto out;
+        }
+        ret = qemu_load_ram_state(mis->from_src_file);
+        if (ret < 0) {
+            error_report("load ram state error");
+            goto out;
+        }
          /* read the VM state total size first */
          ret = colo_ctl_get(mis->from_src_file,
                             COLO_COMMAND_VMSTATE_SIZE, &value);
@@ -580,8 +601,10 @@ void *colo_process_incoming_thread(void *opaque)
          qemu_mutex_lock_iothread();
          qemu_system_reset(VMRESET_SILENT);
          vmstate_loading = true;
-        if (qemu_loadvm_state(fb) < 0) {
-            error_report("COLO: loadvm failed");
+        colo_flush_ram_cache();
+        ret = qemu_load_device_state(fb);
+        if (ret < 0) {
+            error_report("COLO: load device state failed\n");
              vmstate_loading = false;
              qemu_mutex_unlock_iothread();
              goto out;
diff --git a/migration/ram.c b/migration/ram.c
index 4f37144..06a738b 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2448,7 +2448,6 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
       * be atomic
       */
      bool postcopy_running = postcopy_state_get() >= 
POSTCOPY_INCOMING_LISTENING;
-    bool need_flush = false;
      seq_iter++;
@@ -2483,7 +2482,6 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
              /* After going into COLO, we should load the Page into colo_cache 
*/
              if (ram_cache_enable) {
                  host = colo_cache_from_block_offset(block, addr);
-                need_flush = true;
              } else {
                  host = host_from_ram_block_offset(block, addr);
              }
@@ -2578,9 +2576,6 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
      rcu_read_unlock();
-    if (!ret  && ram_cache_enable && need_flush) {
-        colo_flush_ram_cache();
-    }
      DPRINTF("Completed load of VM with exit code %d seq iteration "
              "%" PRIu64 "\n", ret, seq_iter);
      return ret;
diff --git a/migration/savevm.c b/migration/savevm.c
index c7c26d8..949caf0 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -50,6 +50,7 @@
  #include "qemu/iov.h"
  #include "block/snapshot.h"
  #include "block/qapi.h"
+#include "migration/colo.h"
  #ifndef ETH_P_RARP
@@ -923,6 +924,10 @@ void qemu_savevm_state_begin(QEMUFile *f,
              break;
          }
      }
+    if (migration_in_colo_state()) {
+        qemu_put_byte(f, QEMU_VM_EOF);
+        qemu_fflush(f);
+    }
  }
  /*
@@ -1192,13 +1197,44 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
      return ret;
  }
-static int qemu_save_device_state(QEMUFile *f)
+int qemu_save_ram_precopy(QEMUFile *f)
  {
      SaveStateEntry *se;
+    int ret = 0;
-    qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
-    qemu_put_be32(f, QEMU_VM_FILE_VERSION);
+    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+        if (!se->ops || !se->ops->save_live_complete_precopy) {
+            continue;
+        }
+        if (se->ops && se->ops->is_active) {
+            if (!se->ops->is_active(se->opaque)) {
+                continue;
+            }
+        }
+        trace_savevm_section_start(se->idstr, se->section_id);
+        save_section_header(f, se, QEMU_VM_SECTION_END);
+
+        ret = se->ops->save_live_complete_precopy(f, se->opaque);
+        trace_savevm_section_end(se->idstr, se->section_id, ret);
+        save_section_footer(f, se);
+        if (ret < 0) {
+            qemu_file_set_error(f, ret);
+            return ret;
+        }
+    }
+    qemu_put_byte(f, QEMU_VM_EOF);
+
+    return 0;
+}
+
+int qemu_save_device_state(QEMUFile *f)
+{
+    SaveStateEntry *se;
+
+    if (!migration_in_colo_state()) {
+        qemu_savevm_state_header(f);
+    }
      cpu_synchronize_all_states();
      QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
@@ -1938,6 +1974,96 @@ int qemu_loadvm_state(QEMUFile *f)
      return ret;
  }
+int qemu_loadvm_state_begin(QEMUFile *f)
+{
+    uint8_t section_type;
+    int ret = -1;
+    MigrationIncomingState *mis = migration_incoming_get_current();
+
+    if (!mis) {
+        error_report("qemu_loadvm_state_begin");
+        return -EINVAL;
+    }
+    /* CleanUp */
+    loadvm_free_handlers(mis);
+
+    if (qemu_savevm_state_blocked(NULL)) {
+        return -EINVAL;
+    }
+
+    while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
+        if (section_type != QEMU_VM_SECTION_START) {
+            error_report("QEMU_VM_SECTION_START");
+            ret = -EINVAL;
+            goto out;
+        }
+        ret = qemu_loadvm_section_start_full(f, mis);
+        if (ret < 0) {
+            goto out;
+        }
+    }
+    ret = qemu_file_get_error(f);
+    if (ret == 0) {
+        return 0;
+     }
+out:
+    return ret;
+}
+
+int qemu_load_ram_state(QEMUFile *f)
+{
+    uint8_t section_type;
+    MigrationIncomingState *mis = migration_incoming_get_current();
+    int ret = -1;
+
+    while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
+        if (section_type != QEMU_VM_SECTION_PART &&
+            section_type != QEMU_VM_SECTION_END) {
+            error_report("load ram state, not get "
+                         "QEMU_VM_SECTION_FULL or QEMU_VM_SECTION_END");
+            return -EINVAL;
+        }
+        ret = qemu_loadvm_section_part_end(f, mis);
+        if (ret < 0) {
+            goto out;
+        }
+    }
+    ret = qemu_file_get_error(f);
+    if (ret == 0) {
+        return 0;
+     }
+out:
+    return ret;
+}
+
+int qemu_load_device_state(QEMUFile *f)
+{
+    uint8_t section_type;
+    MigrationIncomingState *mis = migration_incoming_get_current();
+    int ret = -1;
+
+    while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
+        if (section_type != QEMU_VM_SECTION_FULL) {
+            error_report("load device state error: "
+                         "Not get QEMU_VM_SECTION_FULL");
+            return -EINVAL;
+        }
+         ret = qemu_loadvm_section_start_full(f, mis);
+         if (ret < 0) {
+            goto out;
+         }

coding sytle:above 4 lines are not aligned


+    }
+
+     ret = qemu_file_get_error(f);
+
+    cpu_synchronize_all_post_init();
+     if (ret == 0) {
+        return 0;
+     }

aligned issue


I will fix them all in next version, thanks.


thanks
Li

+out:
+    return ret;
+}
+
  void hmp_savevm(Monitor *mon, const QDict *qdict)
  {
      BlockDriverState *bs, *bs1;




.






reply via email to

[Prev in Thread] Current Thread [Next in Thread]