[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH COLO-Frame v10 30/38] COLO: Separate the process of
From: |
zhanghailiang |
Subject: |
[Qemu-devel] [PATCH COLO-Frame v10 30/38] COLO: Separate the process of saving/loading ram and device state |
Date: |
Tue, 3 Nov 2015 19:56:48 +0800 |
We separate the process of saving/loading ram and device state when do
checkpoint,
we add new helpers for save/load ram/device. With this change, we can directly
transfer ram from master to slave without using QEMUSizeBuffer as assistant,
which also reduce the size of extra memory been used during checkpoint.
Besides, we move the colo_flush_ram_cache to the proper position after the
above change.
Signed-off-by: zhanghailiang <address@hidden>
Signed-off-by: Li Zhijian <address@hidden>
---
include/sysemu/sysemu.h | 5 ++
migration/colo.c | 43 +++++++++++----
migration/ram.c | 8 ---
migration/savevm.c | 142 +++++++++++++++++++++++++++++++++++++++++++++++-
4 files changed, 177 insertions(+), 21 deletions(-)
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index 7297678..af1e1c7 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -94,7 +94,12 @@ int qemu_savevm_state_iterate(QEMUFile *f);
void qemu_savevm_state_complete(QEMUFile *f);
void qemu_savevm_state_cancel(void);
uint64_t qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size);
+int qemu_save_ram_state(QEMUFile *f);
+int qemu_save_device_state(QEMUFile *f);
int qemu_loadvm_state(QEMUFile *f);
+int qemu_loadvm_state_begin(QEMUFile *f);
+int qemu_load_ram_state(QEMUFile *f);
+int qemu_load_device_state(QEMUFile *f);
typedef enum DisplayType
{
diff --git a/migration/colo.c b/migration/colo.c
index 8a3cc1c..21cef34 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -250,21 +250,32 @@ static int colo_do_checkpoint_transaction(MigrationState
*s,
goto out;
}
+ ret = colo_ctl_put(s->to_dst_file, COLO_COMMAND_VMSTATE_SEND, 0);
+ if (ret < 0) {
+ goto out;
+ }
/* Disable block migration */
s->params.blk = 0;
s->params.shared = 0;
- qemu_savevm_state_header(trans);
- qemu_savevm_state_begin(trans, &s->params);
- qemu_mutex_lock_iothread();
- qemu_savevm_state_complete(trans);
- qemu_mutex_unlock_iothread();
-
- qemu_fflush(trans);
+ qemu_savevm_state_begin(s->to_dst_file, &s->params);
+ ret = qemu_file_get_error(s->to_dst_file);
+ if (ret < 0) {
+ error_report("save vm state begin error\n");
+ goto out;
+ }
- ret = colo_ctl_put(s->to_dst_file, COLO_COMMAND_VMSTATE_SEND, 0);
+ qemu_mutex_lock_iothread();
+ /* Note: device state is saved into buffer */
+ ret = qemu_save_device_state(trans);
if (ret < 0) {
+ error_report("save device state error\n");
+ qemu_mutex_unlock_iothread();
goto out;
}
+ qemu_fflush(trans);
+ qemu_save_ram_state(s->to_dst_file);
+ qemu_mutex_unlock_iothread();
+
/* we send the total size of the vmstate first */
size = qsb_get_length(buffer);
ret = colo_ctl_put(s->to_dst_file, COLO_COMMAND_VMSTATE_SIZE, size);
@@ -544,6 +555,16 @@ void *colo_process_incoming_thread(void *opaque)
goto out;
}
+ ret = qemu_loadvm_state_begin(mis->from_src_file);
+ if (ret < 0) {
+ error_report("load vm state begin error, ret=%d", ret);
+ goto out;
+ }
+ ret = qemu_load_ram_state(mis->from_src_file);
+ if (ret < 0) {
+ error_report("load ram state error");
+ goto out;
+ }
/* read the VM state total size first */
total_size = colo_ctl_get(mis->from_src_file,
COLO_COMMAND_VMSTATE_SIZE);
@@ -573,8 +594,10 @@ void *colo_process_incoming_thread(void *opaque)
qemu_mutex_lock_iothread();
qemu_system_reset(VMRESET_SILENT);
vmstate_loading = true;
- if (qemu_loadvm_state(fb) < 0) {
- error_report("COLO: loadvm failed");
+ colo_flush_ram_cache();
+ ret = qemu_load_device_state(fb);
+ if (ret < 0) {
+ error_report("COLO: load device state failed\n");
vmstate_loading = false;
qemu_mutex_unlock_iothread();
goto out;
diff --git a/migration/ram.c b/migration/ram.c
index 8de5a5f..94bb47b 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1601,7 +1601,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
int flags = 0, ret = 0;
static uint64_t seq_iter;
int len = 0;
- bool need_flush = false;
seq_iter++;
@@ -1671,7 +1670,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
break;
}
- need_flush = true;
ch = qemu_get_byte(f);
ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
break;
@@ -1683,7 +1681,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
break;
}
- need_flush = true;
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
break;
case RAM_SAVE_FLAG_COMPRESS_PAGE:
@@ -1716,7 +1713,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
ret = -EINVAL;
break;
}
- need_flush = true;
break;
case RAM_SAVE_FLAG_EOS:
/* normal exit */
@@ -1737,10 +1733,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
rcu_read_unlock();
- if (!ret && ram_cache_enable && need_flush) {
- DPRINTF("Flush ram_cache\n");
- colo_flush_ram_cache();
- }
DPRINTF("Completed load of VM with exit code %d seq iteration "
"%" PRIu64 "\n", ret, seq_iter);
return ret;
diff --git a/migration/savevm.c b/migration/savevm.c
index 1296cc3..8dc4b64 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -752,6 +752,10 @@ void qemu_savevm_state_begin(QEMUFile *f,
break;
}
}
+ if (migration_in_colo_state()) {
+ qemu_put_byte(f, QEMU_VM_EOF);
+ qemu_fflush(f);
+ }
}
/*
@@ -949,13 +953,44 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
return ret;
}
-static int qemu_save_device_state(QEMUFile *f)
+int qemu_save_ram_state(QEMUFile *f)
{
SaveStateEntry *se;
+ int ret = 0;
- qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
- qemu_put_be32(f, QEMU_VM_FILE_VERSION);
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ if (!se->ops || !se->ops->save_live_complete) {
+ continue;
+ }
+ if (se->ops && se->ops->is_active) {
+ if (!se->ops->is_active(se->opaque)) {
+ continue;
+ }
+ }
+ trace_savevm_section_start(se->idstr, se->section_id);
+
+ save_section_header(f, se, QEMU_VM_SECTION_END);
+
+ ret = se->ops->save_live_complete(f, se->opaque);
+ trace_savevm_section_end(se->idstr, se->section_id, ret);
+ save_section_footer(f, se);
+ if (ret < 0) {
+ qemu_file_set_error(f, ret);
+ return ret;
+ }
+ }
+ qemu_put_byte(f, QEMU_VM_EOF);
+ return 0;
+}
+
+int qemu_save_device_state(QEMUFile *f)
+{
+ SaveStateEntry *se;
+
+ if (!migration_in_colo_state()) {
+ qemu_savevm_state_header(f);
+ }
cpu_synchronize_all_states();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
@@ -1264,6 +1299,107 @@ out:
return ret;
}
+int qemu_loadvm_state_begin(QEMUFile *f)
+{
+ uint8_t section_type;
+ int ret = -1;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+
+ if (!mis) {
+ error_report("qemu_loadvm_state_begin");
+ return -EINVAL;
+ }
+ /* CleanUp */
+ loadvm_free_handlers(mis);
+
+ if (qemu_savevm_state_blocked(NULL)) {
+ return -EINVAL;
+ }
+
+ if (!savevm_state.skip_configuration) {
+ if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
+ error_report("Configuration section missing");
+ return -EINVAL;
+ }
+ ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0);
+
+ if (ret) {
+ return ret;
+ }
+ }
+
+ while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
+ if (section_type != QEMU_VM_SECTION_START) {
+ error_report("QEMU_VM_SECTION_START");
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = qemu_loadvm_section_start_full(f, mis);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+ ret = qemu_file_get_error(f);
+ if (ret == 0) {
+ return 0;
+ }
+out:
+ return ret;
+}
+
+int qemu_load_ram_state(QEMUFile *f)
+{
+ uint8_t section_type;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ int ret = -1;
+
+ while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
+ if (section_type != QEMU_VM_SECTION_PART &&
+ section_type != QEMU_VM_SECTION_END) {
+ error_report("load ram state, not get "
+ "QEMU_VM_SECTION_FULL or QEMU_VM_SECTION_END");
+ return -EINVAL;
+ }
+ ret = qemu_loadvm_section_part_end(f, mis);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+ ret = qemu_file_get_error(f);
+ if (ret == 0) {
+ return 0;
+ }
+out:
+ return ret;
+}
+
+int qemu_load_device_state(QEMUFile *f)
+{
+ uint8_t section_type;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ int ret = -1;
+
+ while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
+ if (section_type != QEMU_VM_SECTION_FULL) {
+ error_report("load device state error: "
+ "Not get QEMU_VM_SECTION_FULL");
+ return -EINVAL;
+ }
+ ret = qemu_loadvm_section_start_full(f, mis);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+
+ ret = qemu_file_get_error(f);
+
+ cpu_synchronize_all_post_init();
+ if (ret == 0) {
+ return 0;
+ }
+out:
+ return ret;
+}
static BlockDriverState *find_vmstate_bs(void)
{
BlockDriverState *bs = NULL;
--
1.8.3.1
- [Qemu-devel] [PATCH COLO-Frame v10 25/38] COLO failover: Don't do failover during loading VM's state, (continued)
- [Qemu-devel] [PATCH COLO-Frame v10 25/38] COLO failover: Don't do failover during loading VM's state, zhanghailiang, 2015/11/03
- [Qemu-devel] [PATCH COLO-Frame v10 22/38] COLO: implement default failover treatment, zhanghailiang, 2015/11/03
- [Qemu-devel] [PATCH COLO-Frame v10 02/38] migration: Introduce capability 'x-colo' to migration, zhanghailiang, 2015/11/03
- [Qemu-devel] [PATCH COLO-Frame v10 23/38] qmp event: Add event notification for COLO error, zhanghailiang, 2015/11/03
- [Qemu-devel] [PATCH COLO-Frame v10 03/38] COLO: migrate colo related info to secondary node, zhanghailiang, 2015/11/03
- [Qemu-devel] [PATCH COLO-Frame v10 30/38] COLO: Separate the process of saving/loading ram and device state,
zhanghailiang <=
- [Qemu-devel] [PATCH COLO-Frame v10 27/38] COLO: Process shutdown command for VM in COLO state, zhanghailiang, 2015/11/03
- [Qemu-devel] [PATCH COLO-Frame v10 38/38] COLO: Add block replication into colo process, zhanghailiang, 2015/11/03
- [Qemu-devel] [PATCH COLO-Frame v10 34/38] filter-buffer: Accept zero interval, zhanghailiang, 2015/11/03
- [Qemu-devel] [PATCH COLO-Frame v10 05/38] migration: Integrate COLO checkpoint process into migration, zhanghailiang, 2015/11/03
- [Qemu-devel] [PATCH COLO-Frame v10 36/38] netfilter: Introduce an API to delete all the automatically added netfilters, zhanghailiang, 2015/11/03