qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH COLO-Frame v12 32/38] COLO: Split qemu_savevm_st


From: Hailiang Zhang
Subject: Re: [Qemu-devel] [PATCH COLO-Frame v12 32/38] COLO: Split qemu_savevm_state_begin out of checkpoint process
Date: Mon, 28 Dec 2015 15:29:30 +0800
User-agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Thunderbird/38.4.0

On 2015/12/18 20:01, Dr. David Alan Gilbert wrote:
* zhanghailiang (address@hidden) wrote:
It is unnecessary to call qemu_savevm_state_begin() in every checkponit process.
It mainly sets up devices and does the first device state pass. These data will
not change during the later checkpoint process. So, we split it out of
colo_do_checkpoint_transaction(), in this way, we can reduce these data
transferring in the later checkpoint.

Signed-off-by: zhanghailiang <address@hidden>
Signed-off-by: Li Zhijian <address@hidden>
---
  migration/colo.c | 51 +++++++++++++++++++++++++++++++++++++--------------
  1 file changed, 37 insertions(+), 14 deletions(-)

diff --git a/migration/colo.c b/migration/colo.c
index d253d64..4571359 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -276,15 +276,6 @@ static int colo_do_checkpoint_transaction(MigrationState 
*s,
      if (ret < 0) {
          goto out;
      }
-    /* Disable block migration */
-    s->params.blk = 0;
-    s->params.shared = 0;
-    qemu_savevm_state_begin(s->to_dst_file, &s->params);
-    ret = qemu_file_get_error(s->to_dst_file);
-    if (ret < 0) {
-        error_report("save vm state begin error\n");
-        goto out;
-    }

      qemu_mutex_lock_iothread();
      /* Note: device state is saved into buffer */
@@ -348,6 +339,21 @@ out:
      return ret;
  }

+static int colo_prepare_before_save(MigrationState *s)
+{
+    int ret;
+    /* Disable block migration */
+    s->params.blk = 0;
+    s->params.shared = 0;
+    qemu_savevm_state_begin(s->to_dst_file, &s->params);
+    ret = qemu_file_get_error(s->to_dst_file);
+    if (ret < 0) {
+        error_report("save vm state begin error\n");

  '\n' again not needed.

+        return ret;
+    }
+    return 0;
+}
+
  static void colo_process_checkpoint(MigrationState *s)
  {
      QEMUSizedBuffer *buffer = NULL;
@@ -363,6 +369,11 @@ static void colo_process_checkpoint(MigrationState *s)
          goto out;
      }

+    ret = colo_prepare_before_save(s);
+    if (ret < 0) {
+        goto out;
+    }
+
      /*
       * Wait for Secondary finish loading vm states and enter COLO
       * restore.
@@ -484,6 +495,18 @@ static int colo_wait_handle_cmd(QEMUFile *f, int 
*checkpoint_request)
      }
  }

+static int colo_prepare_before_load(QEMUFile *f)
+{
+    int ret;
+
+    ret = qemu_loadvm_state_begin(f);
+    if (ret < 0) {
+        error_report("load vm state begin error, ret=%d", ret);
+        return ret;

You can simplify these returns; remove this line.

+    }
+    return 0;

and make this return ret; same in a few places.


Other than those minor issues;


I will fix them all in next version.

Reviewed-by: Dr. David Alan Gilbert <address@hidden>


Thanks.
Hailiang


+}
+
  void *colo_process_incoming_thread(void *opaque)
  {
      MigrationIncomingState *mis = opaque;
@@ -522,6 +545,11 @@ void *colo_process_incoming_thread(void *opaque)
          goto out;
      }

+    ret = colo_prepare_before_load(mis->from_src_file);
+    if (ret < 0) {
+        goto out;
+    }
+
      ret = colo_put_cmd(mis->to_src_file, COLO_COMMAND_CHECKPOINT_READY);
      if (ret < 0) {
          goto out;
@@ -556,11 +584,6 @@ void *colo_process_incoming_thread(void *opaque)
              goto out;
          }

-        ret = qemu_loadvm_state_begin(mis->from_src_file);
-        if (ret < 0) {
-            error_report("load vm state begin error, ret=%d", ret);
-            goto out;
-        }
          ret = qemu_load_ram_state(mis->from_src_file);
          if (ret < 0) {
              error_report("load ram state error");
--
1.8.3.1


--
Dr. David Alan Gilbert / address@hidden / Manchester, UK

.






reply via email to

[Prev in Thread] Current Thread [Next in Thread]