qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v7 10/12] multifd: Support for zero pages transmission


From: chuang xu
Subject: Re: [PATCH v7 10/12] multifd: Support for zero pages transmission
Date: Tue, 25 Oct 2022 17:10:46 +0800
User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:91.0) Gecko/20100101 Thunderbird/91.13.1


On 2022/8/2 下午2:39, Juan Quintela wrote:
This patch adds counters and similar.  Logic will be added on the
following patch.

Signed-off-by: Juan Quintela <quintela@redhat.com>

---

Added counters for duplicated/non duplicated pages.
Removed reviewed by from David.
Add total_zero_pages
---
  migration/multifd.h    | 17 ++++++++++++++++-
  migration/multifd.c    | 36 +++++++++++++++++++++++++++++-------
  migration/ram.c        |  2 --
  migration/trace-events |  8 ++++----
  4 files changed, 49 insertions(+), 14 deletions(-)

diff --git a/migration/multifd.h b/migration/multifd.h
index cd389d18d2..a1b852200d 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -47,7 +47,10 @@ typedef struct {
      /* size of the next packet that contains pages */
      uint32_t next_packet_size;
      uint64_t packet_num;
-    uint64_t unused[4];    /* Reserved for future use */
+    /* zero pages */
+    uint32_t zero_pages;
+    uint32_t unused32[1];    /* Reserved for future use */
+    uint64_t unused64[3];    /* Reserved for future use */
      char ramblock[256];
      uint64_t offset[];
  } __attribute__((packed)) MultiFDPacket_t;
@@ -127,6 +130,8 @@ typedef struct {
      uint64_t num_packets;
      /* non zero pages sent through this channel */
      uint64_t total_normal_pages;
+    /* zero pages sent through this channel */
+    uint64_t total_zero_pages;
      /* buffers to send */
      struct iovec *iov;
      /* number of iovs used */
@@ -135,6 +140,10 @@ typedef struct {
      ram_addr_t *normal;
      /* num of non zero pages */
      uint32_t normal_num;
+    /* Pages that are  zero */
+    ram_addr_t *zero;
+    /* num of zero pages */
+    uint32_t zero_num;
      /* used for compression methods */
      void *data;
  }  MultiFDSendParams;
@@ -184,12 +193,18 @@ typedef struct {
      uint8_t *host;
      /* non zero pages recv through this channel */
      uint64_t total_normal_pages;
+    /* zero pages recv through this channel */
+    uint64_t total_zero_pages;
      /* buffers to recv */
      struct iovec *iov;
      /* Pages that are not zero */
      ram_addr_t *normal;
      /* num of non zero pages */
      uint32_t normal_num;
+    /* Pages that are  zero */
+    ram_addr_t *zero;
+    /* num of zero pages */
+    uint32_t zero_num;
      /* used for de-compression methods */
      void *data;
  } MultiFDRecvParams;
diff --git a/migration/multifd.c b/migration/multifd.c
index 68fc9f8e88..4473d9f834 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -263,6 +263,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)
      packet->normal_pages = cpu_to_be32(p->normal_num);
      packet->next_packet_size = cpu_to_be32(p->next_packet_size);
      packet->packet_num = cpu_to_be64(p->packet_num);
+    packet->zero_pages = cpu_to_be32(p->zero_num);
if (p->pages->block) {
          strncpy(packet->ramblock, p->pages->block->idstr, 256);
@@ -323,7 +324,15 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams 
*p, Error **errp)
      p->next_packet_size = be32_to_cpu(packet->next_packet_size);
      p->packet_num = be64_to_cpu(packet->packet_num);
- if (p->normal_num == 0) {
+    p->zero_num = be32_to_cpu(packet->zero_pages);
+    if (p->zero_num > packet->pages_alloc - p->normal_num) {
+        error_setg(errp, "multifd: received packet "
+                   "with %u zero pages and expected maximum pages are %u",
+                   p->zero_num, packet->pages_alloc - p->normal_num) ;
+        return -1;
+    }
+
+    if (p->normal_num == 0 && p->zero_num == 0) {
          return 0;
      }
@@ -432,6 +441,8 @@ static int multifd_send_pages(QEMUFile *f)
      ram_counters.multifd_bytes += p->sent_bytes;
      qemu_file_acct_rate_limit(f, p->sent_bytes);
      p->sent_bytes = 0;
+    ram_counters.normal += p->normal_num;
+    ram_counters.duplicate += p->zero_num;
      qemu_mutex_unlock(&p->mutex);
      qemu_sem_post(&p->sem);
@@ -545,6 +556,8 @@ void multifd_save_cleanup(void)
          p->iov = NULL;
          g_free(p->normal);
          p->normal = NULL;
+        g_free(p->zero);
+        p->zero = NULL;
          multifd_send_state->ops->send_cleanup(p, &local_err);
          if (local_err) {
              migrate_set_error(migrate_get_current(), local_err);
@@ -666,6 +679,7 @@ static void *multifd_send_thread(void *opaque)
              qemu_mutex_unlock(&p->mutex);
p->normal_num = 0;
+            p->zero_num = 0;
if (use_zero_copy_send) {
                  p->iovs_num = 0;
@@ -687,8 +701,8 @@ static void *multifd_send_thread(void *opaque)
              }
              multifd_send_fill_packet(p);
- trace_multifd_send(p->id, packet_num, p->normal_num, p->flags,
-                               p->next_packet_size);
+            trace_multifd_send(p->id, packet_num, p->normal_num, p->zero_num,
+                               p->flags, p->next_packet_size);
if (use_zero_copy_send) {
                  /* Send header first, without zerocopy */
@@ -712,6 +726,7 @@ static void *multifd_send_thread(void *opaque)
              qemu_mutex_lock(&p->mutex);
              p->num_packets++;
              p->total_normal_pages += p->normal_num;
+            p->total_zero_pages += p->zero_num;
              p->pages->num = 0;
              p->pages->block = NULL;
              p->sent_bytes += p->packet_len;;
@@ -753,7 +768,8 @@ out:
      qemu_mutex_unlock(&p->mutex);
rcu_unregister_thread();
-    trace_multifd_send_thread_end(p->id, p->num_packets, 
p->total_normal_pages);
+    trace_multifd_send_thread_end(p->id, p->num_packets, p->total_normal_pages,
+                                  p->total_zero_pages);
return NULL;
  }
@@ -938,6 +954,7 @@ int multifd_save_setup(Error **errp)
          p->normal = g_new0(ram_addr_t, page_count);
          p->page_size = qemu_target_page_size();
          p->page_count = page_count;
+        p->zero = g_new0(ram_addr_t, page_count);
if (migrate_use_zero_copy_send()) {
              p->write_flags = QIO_CHANNEL_WRITE_FLAG_ZERO_COPY;
@@ -1046,6 +1063,8 @@ int multifd_load_cleanup(Error **errp)
          p->iov = NULL;
          g_free(p->normal);
          p->normal = NULL;
+        g_free(p->zero);
+        p->zero = NULL;
          multifd_recv_state->ops->recv_cleanup(p);
      }
      qemu_sem_destroy(&multifd_recv_state->sem_sync);
@@ -1116,13 +1135,14 @@ static void *multifd_recv_thread(void *opaque)
              break;
          }
- trace_multifd_recv(p->id, p->packet_num, p->normal_num, p->flags,
-                           p->next_packet_size);
+        trace_multifd_recv(p->id, p->packet_num, p->normal_num, p->zero_num,
+                           p->flags, p->next_packet_size);
          sync_needed = p->flags & MULTIFD_FLAG_SYNC;
          /* recv methods don't know how to handle the SYNC flag */
          p->flags &= ~MULTIFD_FLAG_SYNC;
          p->num_packets++;
          p->total_normal_pages += p->normal_num;
+        p->total_normal_pages += p->zero_num;

Hi, Juan:

If I understand correctly, it should be "p->total_zero_pages += p->zero_num; ".

By the way, This patch seems to greatly improve the performance of zero page checking,  but it seems that there has been no new update in the past two months. I want to know when it will be merged into master?




reply via email to

[Prev in Thread] Current Thread [Next in Thread]