[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH COLO-Frame v12 31/38] COLO: Separate the process of
From: |
zhanghailiang |
Subject: |
[Qemu-devel] [PATCH COLO-Frame v12 31/38] COLO: Separate the process of saving/loading ram and device state |
Date: |
Tue, 15 Dec 2015 16:22:52 +0800 |
We separate the process of saving/loading ram and device state when do
checkpoint,
we add new helpers for save/load ram/device. With this change, we can directly
transfer ram from master to slave without using QEMUSizeBuffer as assistant,
which also reduce the size of extra memory been used during checkpoint.
Besides, we move the colo_flush_ram_cache to the proper position after the
above change.
Signed-off-by: zhanghailiang <address@hidden>
Signed-off-by: Li Zhijian <address@hidden>
---
v11:
- Remove load configuration section in qemu_loadvm_state_begin()
Signed-off-by: zhanghailiang <address@hidden>
---
include/sysemu/sysemu.h | 6 +++
migration/colo.c | 43 ++++++++++++----
migration/ram.c | 5 --
migration/savevm.c | 132 ++++++++++++++++++++++++++++++++++++++++++++++--
4 files changed, 168 insertions(+), 18 deletions(-)
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index 91eeda3..5deae53 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -133,7 +133,13 @@ void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f,
const char *name,
uint64_t *start_list,
uint64_t *length_list);
+int qemu_save_ram_precopy(QEMUFile *f);
+int qemu_save_device_state(QEMUFile *f);
+
int qemu_loadvm_state(QEMUFile *f);
+int qemu_loadvm_state_begin(QEMUFile *f);
+int qemu_load_ram_state(QEMUFile *f);
+int qemu_load_device_state(QEMUFile *f);
typedef enum DisplayType
{
diff --git a/migration/colo.c b/migration/colo.c
index 62a0444..d253d64 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -272,21 +272,32 @@ static int colo_do_checkpoint_transaction(MigrationState
*s,
goto out;
}
+ ret = colo_put_cmd(s->to_dst_file, COLO_COMMAND_VMSTATE_SEND);
+ if (ret < 0) {
+ goto out;
+ }
/* Disable block migration */
s->params.blk = 0;
s->params.shared = 0;
- qemu_savevm_state_header(trans);
- qemu_savevm_state_begin(trans, &s->params);
- qemu_mutex_lock_iothread();
- qemu_savevm_state_complete_precopy(trans, false);
- qemu_mutex_unlock_iothread();
-
- qemu_fflush(trans);
+ qemu_savevm_state_begin(s->to_dst_file, &s->params);
+ ret = qemu_file_get_error(s->to_dst_file);
+ if (ret < 0) {
+ error_report("save vm state begin error\n");
+ goto out;
+ }
- ret = colo_put_cmd(s->to_dst_file, COLO_COMMAND_VMSTATE_SEND);
+ qemu_mutex_lock_iothread();
+ /* Note: device state is saved into buffer */
+ ret = qemu_save_device_state(trans);
if (ret < 0) {
+ error_report("save device state error\n");
+ qemu_mutex_unlock_iothread();
goto out;
}
+ qemu_fflush(trans);
+ qemu_save_ram_precopy(s->to_dst_file);
+ qemu_mutex_unlock_iothread();
+
/* we send the total size of the vmstate first */
size = qsb_get_length(buffer);
ret = colo_put_cmd_value(s->to_dst_file, COLO_COMMAND_VMSTATE_SIZE, size);
@@ -545,6 +556,16 @@ void *colo_process_incoming_thread(void *opaque)
goto out;
}
+ ret = qemu_loadvm_state_begin(mis->from_src_file);
+ if (ret < 0) {
+ error_report("load vm state begin error, ret=%d", ret);
+ goto out;
+ }
+ ret = qemu_load_ram_state(mis->from_src_file);
+ if (ret < 0) {
+ error_report("load ram state error");
+ goto out;
+ }
/* read the VM state total size first */
ret = colo_get_cmd_value(mis->from_src_file,
COLO_COMMAND_VMSTATE_SIZE, &value);
@@ -577,8 +598,10 @@ void *colo_process_incoming_thread(void *opaque)
qemu_mutex_lock_iothread();
qemu_system_reset(VMRESET_SILENT);
vmstate_loading = true;
- if (qemu_loadvm_state(fb) < 0) {
- error_report("COLO: loadvm failed");
+ colo_flush_ram_cache();
+ ret = qemu_load_device_state(fb);
+ if (ret < 0) {
+ error_report("COLO: load device state failed\n");
vmstate_loading = false;
qemu_mutex_unlock_iothread();
goto out;
diff --git a/migration/ram.c b/migration/ram.c
index 8ff7f7c..45d9332 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2458,7 +2458,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
* be atomic
*/
bool postcopy_running = postcopy_state_get() >=
POSTCOPY_INCOMING_LISTENING;
- bool need_flush = false;
seq_iter++;
@@ -2493,7 +2492,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
/* After going into COLO, we should load the Page into colo_cache
*/
if (ram_cache_enable) {
host = colo_cache_from_block_offset(block, addr);
- need_flush = true;
} else {
host = host_from_ram_block_offset(block, addr);
}
@@ -2588,9 +2586,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
rcu_read_unlock();
- if (!ret && ram_cache_enable && need_flush) {
- colo_flush_ram_cache();
- }
DPRINTF("Completed load of VM with exit code %d seq iteration "
"%" PRIu64 "\n", ret, seq_iter);
return ret;
diff --git a/migration/savevm.c b/migration/savevm.c
index c7c26d8..94c0d10 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -50,6 +50,7 @@
#include "qemu/iov.h"
#include "block/snapshot.h"
#include "block/qapi.h"
+#include "migration/colo.h"
#ifndef ETH_P_RARP
@@ -923,6 +924,10 @@ void qemu_savevm_state_begin(QEMUFile *f,
break;
}
}
+ if (migration_in_colo_state()) {
+ qemu_put_byte(f, QEMU_VM_EOF);
+ qemu_fflush(f);
+ }
}
/*
@@ -1192,13 +1197,44 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
return ret;
}
-static int qemu_save_device_state(QEMUFile *f)
+int qemu_save_ram_precopy(QEMUFile *f)
{
SaveStateEntry *se;
+ int ret = 0;
- qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
- qemu_put_be32(f, QEMU_VM_FILE_VERSION);
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ if (!se->ops || !se->ops->save_live_complete_precopy) {
+ continue;
+ }
+ if (se->ops && se->ops->is_active) {
+ if (!se->ops->is_active(se->opaque)) {
+ continue;
+ }
+ }
+ trace_savevm_section_start(se->idstr, se->section_id);
+
+ save_section_header(f, se, QEMU_VM_SECTION_END);
+ ret = se->ops->save_live_complete_precopy(f, se->opaque);
+ trace_savevm_section_end(se->idstr, se->section_id, ret);
+ save_section_footer(f, se);
+ if (ret < 0) {
+ qemu_file_set_error(f, ret);
+ return ret;
+ }
+ }
+ qemu_put_byte(f, QEMU_VM_EOF);
+
+ return 0;
+}
+
+int qemu_save_device_state(QEMUFile *f)
+{
+ SaveStateEntry *se;
+
+ if (!migration_in_colo_state()) {
+ qemu_savevm_state_header(f);
+ }
cpu_synchronize_all_states();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
@@ -1938,6 +1974,96 @@ int qemu_loadvm_state(QEMUFile *f)
return ret;
}
+int qemu_loadvm_state_begin(QEMUFile *f)
+{
+ uint8_t section_type;
+ int ret = -1;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+
+ if (!mis) {
+ error_report("qemu_loadvm_state_begin");
+ return -EINVAL;
+ }
+ /* CleanUp */
+ loadvm_free_handlers(mis);
+
+ if (qemu_savevm_state_blocked(NULL)) {
+ return -EINVAL;
+ }
+
+ while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
+ if (section_type != QEMU_VM_SECTION_START) {
+ error_report("QEMU_VM_SECTION_START");
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = qemu_loadvm_section_start_full(f, mis);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+ ret = qemu_file_get_error(f);
+ if (ret == 0) {
+ return 0;
+ }
+out:
+ return ret;
+}
+
+int qemu_load_ram_state(QEMUFile *f)
+{
+ uint8_t section_type;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ int ret = -1;
+
+ while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
+ if (section_type != QEMU_VM_SECTION_PART &&
+ section_type != QEMU_VM_SECTION_END) {
+ error_report("load ram state, not get "
+ "QEMU_VM_SECTION_FULL or QEMU_VM_SECTION_END");
+ return -EINVAL;
+ }
+ ret = qemu_loadvm_section_part_end(f, mis);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+ ret = qemu_file_get_error(f);
+ if (ret == 0) {
+ return 0;
+ }
+out:
+ return ret;
+}
+
+int qemu_load_device_state(QEMUFile *f)
+{
+ uint8_t section_type;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ int ret = -1;
+
+ while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
+ if (section_type != QEMU_VM_SECTION_FULL) {
+ error_report("load device state error: "
+ "Not get QEMU_VM_SECTION_FULL");
+ return -EINVAL;
+ }
+ ret = qemu_loadvm_section_start_full(f, mis);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+
+ ret = qemu_file_get_error(f);
+
+ cpu_synchronize_all_post_init();
+ if (ret == 0) {
+ return 0;
+ }
+out:
+ return ret;
+}
+
void hmp_savevm(Monitor *mon, const QDict *qdict)
{
BlockDriverState *bs, *bs1;
--
1.8.3.1
- Re: [Qemu-devel] [Qemu-block] [PATCH COLO-Frame v12 25/38] qmp event: Add event notification for COLO error, (continued)
- [Qemu-devel] [PATCH COLO-Frame v12 33/38] net/filter-buffer: Add default filter-buffer for each netdev, zhanghailiang, 2015/12/15
- [Qemu-devel] [PATCH COLO-Frame v12 38/38] COLO: Add block replication into colo process, zhanghailiang, 2015/12/15
- [Qemu-devel] [PATCH COLO-Frame v12 37/38] colo: Use default buffer-filter to buffer and release packets, zhanghailiang, 2015/12/15
- [Qemu-devel] [PATCH COLO-Frame v12 35/38] filter-buffer: Introduce a helper function to enable/disable default filter, zhanghailiang, 2015/12/15
- [Qemu-devel] [PATCH COLO-Frame v12 36/38] filter-buffer: Introduce a helper function to release packets, zhanghailiang, 2015/12/15
- [Qemu-devel] [PATCH COLO-Frame v12 34/38] filter-buffer: Accept zero interval, zhanghailiang, 2015/12/15
- [Qemu-devel] [PATCH COLO-Frame v12 13/38] COLO: Save PVM state to secondary side when do checkpoint, zhanghailiang, 2015/12/15
- [Qemu-devel] [PATCH COLO-Frame v12 31/38] COLO: Separate the process of saving/loading ram and device state,
zhanghailiang <=
- [Qemu-devel] [PATCH COLO-Frame v12 29/38] COLO: Update the global runstate after going into colo state, zhanghailiang, 2015/12/15
- [Qemu-devel] [PATCH COLO-Frame v12 14/38] ram: Split host_from_stream_offset() into two helper functions, zhanghailiang, 2015/12/15
- [Qemu-devel] [PATCH COLO-Frame v12 32/38] COLO: Split qemu_savevm_state_begin out of checkpoint process, zhanghailiang, 2015/12/15
- Re: [Qemu-devel] [PATCH COLO-Frame v12 00/38] COarse-grain LOck-stepping(COLO) Virtual Machines for Non-stop Service (FT), Dr. David Alan Gilbert, 2015/12/15