[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH RESEND V3 11/16] savevm: split the process of differ
From: |
Zhang Chen |
Subject: |
[Qemu-devel] [PATCH RESEND V3 11/16] savevm: split the process of different stages for loadvm/savevm |
Date: |
Thu, 4 Jan 2018 14:01:10 +0800 |
From: zhanghailiang <address@hidden>
There are several stages during loadvm/savevm process. In different stage,
migration incoming processes different types of sections.
We want to control these stages more accuracy, it will benefit COLO
performance, we don't have to save type of QEMU_VM_SECTION_START
sections everytime while do checkpoint, besides, we want to separate
the process of saving/loading memory and devices state.
So we add three new helper functions: qemu_load_device_state() and
qemu_savevm_live_state() to achieve different process during migration.
Besides, we make qemu_loadvm_state_main() and qemu_save_device_state()
public, and simplify the codes of qemu_save_device_state() by calling the
wrapper qemu_savevm_state_header().
Cc: Juan Quintela <address@hidden>
Signed-off-by: zhanghailiang <address@hidden>
Signed-off-by: Li Zhijian <address@hidden>
Signed-off-by: Zhang Chen <address@hidden>
Reviewed-by: Dr. David Alan Gilbert <address@hidden>
---
migration/colo.c | 37 +++++++++++++++++++++++++++++--------
migration/savevm.c | 35 ++++++++++++++++++++++++++++-------
migration/savevm.h | 4 ++++
3 files changed, 61 insertions(+), 15 deletions(-)
diff --git a/migration/colo.c b/migration/colo.c
index 790b122..a931ff2 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -29,6 +29,7 @@
#include "qapi-event.h"
#include "block/block.h"
#include "replication.h"
+#include "sysemu/cpus.h"
static bool vmstate_loading;
static Notifier packets_compare_notifier;
@@ -380,24 +381,31 @@ static int colo_do_checkpoint_transaction(MigrationState
*s,
/* Disable block migration */
migrate_set_block_enabled(false, &local_err);
- qemu_savevm_state_header(fb);
- qemu_savevm_state_setup(fb);
qemu_mutex_lock_iothread();
replication_do_checkpoint_all(&local_err);
if (local_err) {
qemu_mutex_unlock_iothread();
goto out;
}
- qemu_savevm_state_complete_precopy(fb, false, false);
- qemu_mutex_unlock_iothread();
-
- qemu_fflush(fb);
colo_send_message(s->to_dst_file, COLO_MESSAGE_VMSTATE_SEND, &local_err);
if (local_err) {
goto out;
}
/*
+ * Only save VM's live state, which not including device state.
+ * TODO: We may need a timeout mechanism to prevent COLO process
+ * to be blocked here.
+ */
+ qemu_savevm_live_state(s->to_dst_file);
+ /* Note: device state is saved into buffer */
+ ret = qemu_save_device_state(fb);
+
+ qemu_mutex_unlock_iothread();
+
+ qemu_fflush(fb);
+
+ /*
* We need the size of the VMstate data in Secondary side,
* With which we can decide how much data should be read.
*/
@@ -610,6 +618,7 @@ void *colo_process_incoming_thread(void *opaque)
uint64_t total_size;
uint64_t value;
Error *local_err = NULL;
+ int ret;
qemu_sem_init(&mis->colo_incoming_sem, 0);
@@ -682,6 +691,16 @@ void *colo_process_incoming_thread(void *opaque)
goto out;
}
+ qemu_mutex_lock_iothread();
+ cpu_synchronize_all_pre_loadvm();
+ ret = qemu_loadvm_state_main(mis->from_src_file, mis);
+ qemu_mutex_unlock_iothread();
+
+ if (ret < 0) {
+ error_report("Load VM's live state (ram) error");
+ goto out;
+ }
+
value = colo_receive_message_value(mis->from_src_file,
COLO_MESSAGE_VMSTATE_SIZE, &local_err);
if (local_err) {
@@ -715,8 +734,9 @@ void *colo_process_incoming_thread(void *opaque)
qemu_mutex_lock_iothread();
qemu_system_reset(SHUTDOWN_CAUSE_NONE);
vmstate_loading = true;
- if (qemu_loadvm_state(fb) < 0) {
- error_report("COLO: loadvm failed");
+ ret = qemu_load_device_state(fb);
+ if (ret < 0) {
+ error_report("COLO: load device state failed");
qemu_mutex_unlock_iothread();
goto out;
}
@@ -777,6 +797,7 @@ out:
if (mis->to_src_file) {
qemu_fclose(mis->to_src_file);
}
+ qemu_loadvm_state_cleanup();
migration_incoming_disable_colo();
return NULL;
diff --git a/migration/savevm.c b/migration/savevm.c
index c582716..30a3c77 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1317,13 +1317,20 @@ done:
return ret;
}
-static int qemu_save_device_state(QEMUFile *f)
+void qemu_savevm_live_state(QEMUFile *f)
{
- SaveStateEntry *se;
+ /* save QEMU_VM_SECTION_END section */
+ qemu_savevm_state_complete_precopy(f, true, false);
+ qemu_put_byte(f, QEMU_VM_EOF);
+}
- qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
- qemu_put_be32(f, QEMU_VM_FILE_VERSION);
+int qemu_save_device_state(QEMUFile *f)
+{
+ SaveStateEntry *se;
+ if (!migration_in_colo_state()) {
+ qemu_savevm_state_header(f);
+ }
cpu_synchronize_all_states();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
@@ -1379,8 +1386,6 @@ enum LoadVMExitCodes {
LOADVM_QUIT = 1,
};
-static int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis);
-
/* ------ incoming postcopy messages ------ */
/* 'advise' arrives before any transfers just to tell us that a postcopy
* *might* happen - it might be skipped if precopy transferred everything
@@ -2003,7 +2008,7 @@ void qemu_loadvm_state_cleanup(void)
}
}
-static int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis)
+int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis)
{
uint8_t section_type;
int ret = 0;
@@ -2148,6 +2153,22 @@ int qemu_loadvm_state(QEMUFile *f)
return ret;
}
+int qemu_load_device_state(QEMUFile *f)
+{
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ int ret;
+
+ /* Load QEMU_VM_SECTION_FULL section */
+ ret = qemu_loadvm_state_main(f, mis);
+ if (ret < 0) {
+ error_report("Failed to load device state: %d", ret);
+ return ret;
+ }
+
+ cpu_synchronize_all_post_init();
+ return 0;
+}
+
int save_snapshot(const char *name, Error **errp)
{
BlockDriverState *bs, *bs1;
diff --git a/migration/savevm.h b/migration/savevm.h
index 041d23c..8d463fd 100644
--- a/migration/savevm.h
+++ b/migration/savevm.h
@@ -52,8 +52,12 @@ void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f,
const char *name,
uint64_t *start_list,
uint64_t *length_list);
void qemu_savevm_send_colo_enable(QEMUFile *f);
+void qemu_savevm_live_state(QEMUFile *f);
+int qemu_save_device_state(QEMUFile *f);
int qemu_loadvm_state(QEMUFile *f);
void qemu_loadvm_state_cleanup(void);
+int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis);
+int qemu_load_device_state(QEMUFile *f);
#endif
--
2.7.4
- [Qemu-devel] [PATCH RESEND V3 03/16] colo-compare: use notifier to notify packets comparing result, (continued)
- [Qemu-devel] [PATCH RESEND V3 03/16] colo-compare: use notifier to notify packets comparing result, Zhang Chen, 2018/01/04
- [Qemu-devel] [PATCH RESEND V3 04/16] COLO: integrate colo compare with colo frame, Zhang Chen, 2018/01/04
- [Qemu-devel] [PATCH RESEND V3 05/16] COLO: Add block replication into colo process, Zhang Chen, 2018/01/04
- [Qemu-devel] [PATCH RESEND V3 06/16] COLO: Remove colo_state migration struct, Zhang Chen, 2018/01/04
- [Qemu-devel] [PATCH RESEND V3 08/16] ram/COLO: Record the dirty pages that SVM received, Zhang Chen, 2018/01/04
- [Qemu-devel] [PATCH RESEND V3 07/16] COLO: Load dirty pages into SVM's RAM cache firstly, Zhang Chen, 2018/01/04
- [Qemu-devel] [PATCH RESEND V3 09/16] COLO: Flush memory data from ram cache, Zhang Chen, 2018/01/04
- [Qemu-devel] [PATCH RESEND V3 10/16] qmp event: Add COLO_EXIT event to notify users while exited COLO, Zhang Chen, 2018/01/04
- [Qemu-devel] [PATCH RESEND V3 11/16] savevm: split the process of different stages for loadvm/savevm,
Zhang Chen <=
- [Qemu-devel] [PATCH RESEND V3 12/16] COLO: flush host dirty ram from cache, Zhang Chen, 2018/01/04
- [Qemu-devel] [PATCH RESEND V3 13/16] filter: Add handle_event method for NetFilterClass, Zhang Chen, 2018/01/04
- [Qemu-devel] [PATCH RESEND V3 14/16] filter-rewriter: handle checkpoint and failover event, Zhang Chen, 2018/01/04
- [Qemu-devel] [PATCH RESEND V3 15/16] COLO: notify net filters about checkpoint/failover event, Zhang Chen, 2018/01/04
- [Qemu-devel] [PATCH RESEND V3 16/16] COLO: quick failover process by kick COLO thread, Zhang Chen, 2018/01/04
- Re: [Qemu-devel] [PATCH RESEND V3 00/16] COLO: integrate colo frame with block replication and COLO proxy, no-reply, 2018/01/04