qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH V6 08/10] migration: calculate vCPU blocktime on


From: Alexey Perevalov
Subject: Re: [Qemu-devel] [PATCH V6 08/10] migration: calculate vCPU blocktime on dst side
Date: Wed, 07 Jun 2017 10:34:19 +0300
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Thunderbird/52.1.1

On 06/01/2017 01:57 PM, Dr. David Alan Gilbert wrote:
* Alexey Perevalov (address@hidden) wrote:
This patch provides blocktime calculation per vCPU,
as a summary and as a overlapped value for all vCPUs.

This approach was suggested by Peter Xu, as an improvements of
previous approch where QEMU kept tree with faulted page address and cpus bitmask
in it. Now QEMU is keeping array with faulted page address as value and vCPU
as index. It helps to find proper vCPU at UFFD_COPY time. Also it keeps
list for blocktime per vCPU (could be traced with page_fault_addr)

Blocktime will not calculated if postcopy_blocktime field of
MigrationIncomingState wasn't initialized.

Signed-off-by: Alexey Perevalov <address@hidden>
<snip>

+    if (dc->vcpu_addr[cpu] == 0) {
+        atomic_inc(&dc->smp_cpus_down);
+    }
+
+    atomic_xchg__nocheck(&dc->vcpu_addr[cpu], addr);
I was wondering if this could be done with atomic_cmpxchg with old=0,
but the behaviour would be different in the case where vcpu_addr[cpu]
wasn't zero  or the 'addr'; so I think allowing it to cope with that
case seems better.

atomic_xchg__nocheck isn't atomic_cmpxchg, it is based on __atomic_exchange_n, 
( from reference
 It writesval  into|*ptr|, and returns the previous contents of|*ptr ) so I 
leave it as is. |


Dave

+    atomic_xchg__nocheck(&dc->last_begin, now_ms);
+    atomic_xchg__nocheck(&dc->page_fault_vcpu_time[cpu], now_ms);
+
+    trace_mark_postcopy_blocktime_begin(addr, dc, 
dc->page_fault_vcpu_time[cpu],
+            cpu);
+}
+
+static void mark_postcopy_blocktime_end(uint64_t addr)
+{
+    MigrationIncomingState *mis = migration_incoming_get_current();
+    PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
+    int i, affected_cpu = 0;
+    int64_t now_ms;
+    bool vcpu_total_blocktime = false;
+    unsigned long int nr_bit;
+
+    if (!dc) {
+        return;
+    }
+    /* mark that page as copied */
+    nr_bit = get_copied_bit_offset(addr);
+    set_bit_atomic(nr_bit, mis->copied_pages);
+
+    now_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+
+    /* lookup cpu, to clear it,
+     * that algorithm looks straighforward, but it's not
+     * optimal, more optimal algorithm is keeping tree or hash
+     * where key is address value is a list of  */
+    for (i = 0; i < smp_cpus; i++) {
+        uint64_t vcpu_blocktime = 0;
+        if (atomic_fetch_add(&dc->vcpu_addr[i], 0) != addr) {
+            continue;
+        }
+        atomic_xchg__nocheck(&dc->vcpu_addr[i], 0);
+        vcpu_blocktime = now_ms -
+            atomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
+        affected_cpu += 1;
+        /* we need to know is that mark_postcopy_end was due to
+         * faulted page, another possible case it's prefetched
+         * page and in that case we shouldn't be here */
+        if (!vcpu_total_blocktime &&
+            atomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
+            vcpu_total_blocktime = true;
+        }
+        /* continue cycle, due to one page could affect several vCPUs */
+        dc->vcpu_blocktime[i] += vcpu_blocktime;
+    }
+
+    atomic_sub(&dc->smp_cpus_down, affected_cpu);
+    if (vcpu_total_blocktime) {
+        dc->total_blocktime += now_ms - atomic_fetch_add(&dc->last_begin, 0);
+    }
+    trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime);
+}
+
  /*
   * Handle faults detected by the USERFAULT markings
   */
@@ -654,8 +750,11 @@ static void *postcopy_ram_fault_thread(void *opaque)
          rb_offset &= ~(qemu_ram_pagesize(rb) - 1);
          trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
                                                  qemu_ram_get_idstr(rb),
-                                                rb_offset);
+                                                rb_offset,
+                                                msg.arg.pagefault.feat.ptid);
+ mark_postcopy_blocktime_begin((uintptr_t)(msg.arg.pagefault.address),
+                msg.arg.pagefault.feat.ptid, rb);
          /*
           * Send the request to the source - we want to request one
           * of our host page sizes (which is >= TPS)
@@ -750,6 +849,7 @@ int postcopy_place_page(MigrationIncomingState *mis, void 
*host, void *from,
return -e;
      }
+    mark_postcopy_blocktime_end((uint64_t)(uintptr_t)host);
trace_postcopy_place_page(host);
      return 0;
diff --git a/migration/trace-events b/migration/trace-events
index 5b8ccf3..7bdadbb 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -112,6 +112,8 @@ process_incoming_migration_co_end(int ret, int ps) "ret=%d 
postcopy-state=%d"
  process_incoming_migration_co_postcopy_end_main(void) ""
  migration_set_incoming_channel(void *ioc, const char *ioctype) "ioc=%p 
ioctype=%s"
  migration_set_outgoing_channel(void *ioc, const char *ioctype, const char *hostname)  
"ioc=%p ioctype=%s hostname=%s"
+mark_postcopy_blocktime_begin(uint64_t addr, void *dd, int64_t time, int cpu) "addr 0x%" PRIx64 
" dd %p time %" PRId64 " cpu %d"
+mark_postcopy_blocktime_end(uint64_t addr, void *dd, int64_t time) "addr 0x%" PRIx64 
" dd %p time %" PRId64
# migration/rdma.c
  qemu_rdma_accept_incoming_migration(void) ""
@@ -188,7 +190,7 @@ postcopy_ram_enable_notify(void) ""
  postcopy_ram_fault_thread_entry(void) ""
  postcopy_ram_fault_thread_exit(void) ""
  postcopy_ram_fault_thread_quit(void) ""
-postcopy_ram_fault_thread_request(uint64_t hostaddr, const char *ramblock, size_t offset) 
"Request for HVA=%" PRIx64 " rb=%s offset=%zx"
+postcopy_ram_fault_thread_request(uint64_t hostaddr, const char *ramblock, size_t offset, uint32_t 
pid) "Request for HVA=%" PRIx64 " rb=%s offset=%zx %u"
  postcopy_ram_incoming_cleanup_closeuf(void) ""
  postcopy_ram_incoming_cleanup_entry(void) ""
  postcopy_ram_incoming_cleanup_exit(void) ""
@@ -197,6 +199,7 @@ save_xbzrle_page_skipping(void) ""
  save_xbzrle_page_overflow(void) ""
  ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRIu64 
" milliseconds, %d iterations"
  ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" 
PRIu64
+get_mem_fault_cpu_index(uint32_t pid) "pid %u is not vCPU"
# migration/exec.c
  migration_exec_outgoing(const char *cmd) "cmd=%s"
--
1.8.3.1

--
Dr. David Alan Gilbert / address@hidden / Manchester, UK




--
Best regards,
Alexey Perevalov



reply via email to

[Prev in Thread] Current Thread [Next in Thread]