qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH COLO-Frame v18 13/34] COLO: Flush PVM's cached RAM i


From: zhanghailiang
Subject: [Qemu-devel] [PATCH COLO-Frame v18 13/34] COLO: Flush PVM's cached RAM into SVM's memory
Date: Wed, 3 Aug 2016 20:25:51 +0800

During the time of VM's running, PVM may dirty some pages, we will transfer
PVM's dirty pages to SVM and store them into SVM's RAM cache at next checkpoint
time. So, the content of SVM's RAM cache will always be same with PVM's memory
after checkpoint.

Instead of flushing all content of PVM's RAM cache into SVM's MEMORY,
we do this in a more efficient way:
Only flush any page that dirtied by PVM since last checkpoint.
In this way, we can ensure SVM's memory same with PVM's.

Besides, we must ensure flush RAM cache before load device state.

Signed-off-by: zhanghailiang <address@hidden>
Signed-off-by: Li Zhijian <address@hidden>
Signed-off-by: Gonglei <address@hidden>
Reviewed-by: Dr. David Alan Gilbert <address@hidden>
---
v12:
- Add a trace point in the end of colo_flush_ram_cache() (Dave's suggestion)
- Add Reviewed-by tag
v11:
- Move the place of 'need_flush' (Dave's suggestion)
- Remove unused 'DPRINTF("Flush ram_cache\n")'
v10:
- trace the number of dirty pages that be received.
---
 include/migration/migration.h |  1 +
 migration/colo.c              |  2 --
 migration/ram.c               | 38 ++++++++++++++++++++++++++++++++++++++
 migration/trace-events        |  2 ++
 4 files changed, 41 insertions(+), 2 deletions(-)

diff --git a/include/migration/migration.h b/include/migration/migration.h
index 55679b5..8ae5248 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -366,4 +366,5 @@ PostcopyState postcopy_state_set(PostcopyState new_state);
 /* ram cache */
 int colo_init_ram_cache(void);
 void colo_release_ram_cache(void);
+void colo_flush_ram_cache(void);
 #endif
diff --git a/migration/colo.c b/migration/colo.c
index 8fa2104..58bd5e1 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -407,8 +407,6 @@ void *colo_process_incoming_thread(void *opaque)
         }
         qemu_mutex_unlock_iothread();
 
-        /* TODO: flush vm state */
-
         colo_send_message(mis->to_src_file, COLO_MESSAGE_VMSTATE_LOADED,
                      &local_err);
         if (local_err) {
diff --git a/migration/ram.c b/migration/ram.c
index 99449c3..198dc53 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2499,6 +2499,7 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
      * be atomic
      */
     bool postcopy_running = postcopy_state_get() >= 
POSTCOPY_INCOMING_LISTENING;
+    bool need_flush = false;
 
     seq_iter++;
 
@@ -2533,6 +2534,7 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
             /* After going into COLO, we should load the Page into colo_cache 
*/
             if (ram_cache_enable) {
                 host = colo_cache_from_block_offset(block, addr);
+                need_flush = true;
             } else {
                 host = host_from_ram_block_offset(block, addr);
             }
@@ -2626,6 +2628,10 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
 
     wait_for_decompress_done();
     rcu_read_unlock();
+
+    if (!ret  && ram_cache_enable && need_flush) {
+        colo_flush_ram_cache();
+    }
     DPRINTF("Completed load of VM with exit code %d seq iteration "
             "%" PRIu64 "\n", ret, seq_iter);
     return ret;
@@ -2698,6 +2704,38 @@ void colo_release_ram_cache(void)
     rcu_read_unlock();
 }
 
+/*
+ * Flush content of RAM cache into SVM's memory.
+ * Only flush the pages that be dirtied by PVM or SVM or both.
+ */
+void colo_flush_ram_cache(void)
+{
+    RAMBlock *block = NULL;
+    void *dst_host;
+    void *src_host;
+    ram_addr_t offset = 0;
+
+    trace_colo_flush_ram_cache_begin(migration_dirty_pages);
+    rcu_read_lock();
+    block = QLIST_FIRST_RCU(&ram_list.blocks);
+    while (block) {
+        ram_addr_t ram_addr_abs;
+        offset = migration_bitmap_find_dirty(block, offset, &ram_addr_abs);
+        migration_bitmap_clear_dirty(ram_addr_abs);
+        if (offset >= block->used_length) {
+            offset = 0;
+            block = QLIST_NEXT_RCU(block, next);
+        } else {
+            dst_host = block->host + offset;
+            src_host = block->colo_cache + offset;
+            memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
+        }
+    }
+    rcu_read_unlock();
+    trace_colo_flush_ram_cache_end();
+    assert(migration_dirty_pages == 0);
+}
+
 static SaveVMHandlers savevm_ram_handlers = {
     .save_live_setup = ram_save_setup,
     .save_live_iterate = ram_save_iterate,
diff --git a/migration/trace-events b/migration/trace-events
index e98eaef..82dc7a0 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -65,6 +65,8 @@ migration_throttle(void) ""
 ram_load_postcopy_loop(uint64_t addr, int flags) "@%" PRIx64 " %x"
 ram_postcopy_send_discard_bitmap(void) ""
 ram_save_queue_pages(const char *rbname, size_t start, size_t len) "%s: start: 
%zx len: %zx"
+colo_flush_ram_cache_begin(uint64_t dirty_pages) "dirty_pages %" PRIu64
+colo_flush_ram_cache_end(void) ""
 
 # migration/migration.c
 await_return_path_close_on_source_close(void) ""
-- 
1.8.3.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]