[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 6/8] multifd: Support for zero pages transmission
From: |
Juan Quintela |
Subject: |
[PATCH v5 6/8] multifd: Support for zero pages transmission |
Date: |
Thu, 10 Mar 2022 16:34:52 +0100 |
This patch adds counters and similar. Logic will be added on the
following patch.
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
Added counters for duplicated/non duplicated pages.
Removed reviewed by from David.
Add total_zero_pages
---
migration/multifd.h | 17 ++++++++++++++++-
migration/multifd.c | 36 +++++++++++++++++++++++++++++-------
migration/ram.c | 2 --
migration/trace-events | 8 ++++----
4 files changed, 49 insertions(+), 14 deletions(-)
diff --git a/migration/multifd.h b/migration/multifd.h
index 3afba8a198..06c52081ab 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -49,7 +49,10 @@ typedef struct {
/* size of the next packet that contains pages */
uint32_t next_packet_size;
uint64_t packet_num;
- uint64_t unused[4]; /* Reserved for future use */
+ /* zero pages */
+ uint32_t zero_pages;
+ uint32_t unused32[1]; /* Reserved for future use */
+ uint64_t unused64[3]; /* Reserved for future use */
char ramblock[256];
uint64_t offset[];
} __attribute__((packed)) MultiFDPacket_t;
@@ -107,6 +110,8 @@ typedef struct {
uint64_t num_packets;
/* non zero pages sent through this channel */
uint64_t total_normal_pages;
+ /* zero pages sent through this channel */
+ uint64_t total_zero_pages;
/* syncs main thread and channels */
QemuSemaphore sem_sync;
/* buffers to send */
@@ -117,6 +122,10 @@ typedef struct {
ram_addr_t *normal;
/* num of non zero pages */
uint32_t normal_num;
+ /* Pages that are zero */
+ ram_addr_t *zero;
+ /* num of zero pages */
+ uint32_t zero_num;
/* used for compression methods */
void *data;
/* How many bytes have we sent on the last packet */
@@ -156,6 +165,8 @@ typedef struct {
uint64_t num_packets;
/* non zero pages recv through this channel */
uint64_t total_normal_pages;
+ /* zero pages recv through this channel */
+ uint64_t total_zero_pages;
/* syncs main thread and channels */
QemuSemaphore sem_sync;
/* buffers to recv */
@@ -164,6 +175,10 @@ typedef struct {
ram_addr_t *normal;
/* num of non zero pages */
uint32_t normal_num;
+ /* Pages that are zero */
+ ram_addr_t *zero;
+ /* num of zero pages */
+ uint32_t zero_num;
/* used for de-compression methods */
void *data;
} MultiFDRecvParams;
diff --git a/migration/multifd.c b/migration/multifd.c
index ab87879471..41769ff99f 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -265,6 +265,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)
packet->normal_pages = cpu_to_be32(p->normal_num);
packet->next_packet_size = cpu_to_be32(p->next_packet_size);
packet->packet_num = cpu_to_be64(p->packet_num);
+ packet->zero_pages = cpu_to_be32(p->zero_num);
if (p->pages->block) {
strncpy(packet->ramblock, p->pages->block->idstr, 256);
@@ -327,7 +328,15 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams
*p, Error **errp)
p->next_packet_size = be32_to_cpu(packet->next_packet_size);
p->packet_num = be64_to_cpu(packet->packet_num);
- if (p->normal_num == 0) {
+ p->zero_num = be32_to_cpu(packet->zero_pages);
+ if (p->zero_num > packet->pages_alloc - p->normal_num) {
+ error_setg(errp, "multifd: received packet "
+ "with %u zero pages and expected maximum pages are %u",
+ p->zero_num, packet->pages_alloc - p->normal_num) ;
+ return -1;
+ }
+
+ if (p->normal_num == 0 && p->zero_num == 0) {
return 0;
}
@@ -436,6 +445,8 @@ static int multifd_send_pages(QEMUFile *f)
ram_counters.multifd_bytes += p->sent_bytes;
qemu_file_update_transfer(f, p->sent_bytes);
p->sent_bytes = 0;
+ ram_counters.normal += p->normal_num;
+ ram_counters.duplicate += p->zero_num;
qemu_mutex_unlock(&p->mutex);
qemu_sem_post(&p->sem);
@@ -551,6 +562,8 @@ void multifd_save_cleanup(void)
p->iov = NULL;
g_free(p->normal);
p->normal = NULL;
+ g_free(p->zero);
+ p->zero = NULL;
multifd_send_state->ops->send_cleanup(p, &local_err);
if (local_err) {
migrate_set_error(migrate_get_current(), local_err);
@@ -636,6 +649,7 @@ static void *multifd_send_thread(void *opaque)
uint32_t flags = p->flags;
p->iovs_num = 1;
p->normal_num = 0;
+ p->zero_num = 0;
for (int i = 0; i < p->pages->num; i++) {
p->normal[p->normal_num] = p->pages->offset[i];
@@ -653,12 +667,13 @@ static void *multifd_send_thread(void *opaque)
p->flags = 0;
p->num_packets++;
p->total_normal_pages += p->normal_num;
+ p->total_zero_pages += p->zero_num;
p->pages->num = 0;
p->pages->block = NULL;
qemu_mutex_unlock(&p->mutex);
- trace_multifd_send(p->id, packet_num, p->normal_num, flags,
- p->next_packet_size);
+ trace_multifd_send(p->id, packet_num, p->normal_num, p->zero_num,
+ flags, p->next_packet_size);
p->iov[0].iov_len = p->packet_len;
p->iov[0].iov_base = p->packet;
@@ -709,7 +724,8 @@ out:
qemu_mutex_unlock(&p->mutex);
rcu_unregister_thread();
- trace_multifd_send_thread_end(p->id, p->num_packets,
p->total_normal_pages);
+ trace_multifd_send_thread_end(p->id, p->num_packets, p->total_normal_pages,
+ p->total_zero_pages);
return NULL;
}
@@ -910,6 +926,7 @@ int multifd_save_setup(Error **errp)
/* We need one extra place for the packet header */
p->iov = g_new0(struct iovec, page_count + 1);
p->normal = g_new0(ram_addr_t, page_count);
+ p->zero = g_new0(ram_addr_t, page_count);
socket_send_channel_create(multifd_new_send_channel_async, p);
}
@@ -1011,6 +1028,8 @@ int multifd_load_cleanup(Error **errp)
p->iov = NULL;
g_free(p->normal);
p->normal = NULL;
+ g_free(p->zero);
+ p->zero = NULL;
multifd_recv_state->ops->recv_cleanup(p);
}
qemu_sem_destroy(&multifd_recv_state->sem_sync);
@@ -1084,10 +1103,11 @@ static void *multifd_recv_thread(void *opaque)
flags = p->flags;
/* recv methods don't know how to handle the SYNC flag */
p->flags &= ~MULTIFD_FLAG_SYNC;
- trace_multifd_recv(p->id, p->packet_num, p->normal_num, flags,
- p->next_packet_size);
+ trace_multifd_recv(p->id, p->packet_num, p->normal_num, p->zero_num,
+ flags, p->next_packet_size);
p->num_packets++;
p->total_normal_pages += p->normal_num;
+ p->total_normal_pages += p->zero_num;
qemu_mutex_unlock(&p->mutex);
if (p->normal_num) {
@@ -1112,7 +1132,8 @@ static void *multifd_recv_thread(void *opaque)
qemu_mutex_unlock(&p->mutex);
rcu_unregister_thread();
- trace_multifd_recv_thread_end(p->id, p->num_packets,
p->total_normal_pages);
+ trace_multifd_recv_thread_end(p->id, p->num_packets, p->total_normal_pages,
+ p->total_zero_pages);
return NULL;
}
@@ -1150,6 +1171,7 @@ int multifd_load_setup(Error **errp)
p->name = g_strdup_printf("multifdrecv_%d", i);
p->iov = g_new0(struct iovec, page_count);
p->normal = g_new0(ram_addr_t, page_count);
+ p->zero = g_new0(ram_addr_t, page_count);
}
for (i = 0; i < thread_count; i++) {
diff --git a/migration/ram.c b/migration/ram.c
index 1a642f1e70..141817d6a7 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1354,8 +1354,6 @@ static int ram_save_multifd_page(RAMState *rs, RAMBlock
*block,
if (multifd_queue_page(rs->f, block, offset) < 0) {
return -1;
}
- ram_counters.normal++;
-
return 1;
}
diff --git a/migration/trace-events b/migration/trace-events
index 1aec580e92..d70e89dbb9 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -114,21 +114,21 @@ unqueue_page(char *block, uint64_t offset, bool dirty)
"ramblock '%s' offset 0x%
# multifd.c
multifd_new_send_channel_async(uint8_t id) "channel %u"
-multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags,
uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " pages %u flags
0x%x next packet size %u"
+multifd_recv(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t zero,
uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 "
normal pages %u zero pages %u flags 0x%x next packet size %u"
multifd_recv_new_channel(uint8_t id) "channel %u"
multifd_recv_sync_main(long packet_num) "packet num %ld"
multifd_recv_sync_main_signal(uint8_t id) "channel %u"
multifd_recv_sync_main_wait(uint8_t id) "channel %u"
multifd_recv_terminate_threads(bool error) "error %d"
-multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel
%u packets %" PRIu64 " pages %" PRIu64
+multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages,
uint64_t zero_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 "
zero pages %" PRIu64
multifd_recv_thread_start(uint8_t id) "%u"
-multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t flags,
uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u
flags 0x%x next packet size %u"
+multifd_send(uint8_t id, uint64_t packet_num, uint32_t normalpages, uint32_t
zero_pages, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num
%" PRIu64 " normal pages %u zero pages %u flags 0x%x next packet size %u"
multifd_send_error(uint8_t id) "channel %u"
multifd_send_sync_main(long packet_num) "packet num %ld"
multifd_send_sync_main_signal(uint8_t id) "channel %u"
multifd_send_sync_main_wait(uint8_t id) "channel %u"
multifd_send_terminate_threads(bool error) "error %d"
-multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages)
"channel %u packets %" PRIu64 " normal pages %" PRIu64
+multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages,
uint64_t zero_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 "
zero pages %" PRIu64
multifd_send_thread_start(uint8_t id) "%u"
multifd_tls_outgoing_handshake_start(void *ioc, void *tioc, const char
*hostname) "ioc=%p tioc=%p hostname=%s"
multifd_tls_outgoing_handshake_error(void *ioc, const char *err) "ioc=%p
err=%s"
--
2.34.1
- [PATCH v5 0/8] Migration: Transmit and detect zero pages in the multifd threads, Juan Quintela, 2022/03/10
- [PATCH v5 1/8] migration: Export ram_transferred_ram(), Juan Quintela, 2022/03/10
- [PATCH v5 2/8] multifd: Count the number of sent bytes correctly, Juan Quintela, 2022/03/10
- [PATCH v5 4/8] multifd: Add property to enable/disable zero_page, Juan Quintela, 2022/03/10
- [PATCH v5 5/8] migration: Export ram_release_page(), Juan Quintela, 2022/03/10
- [PATCH v5 3/8] migration: Make ram_save_target_page() a pointer, Juan Quintela, 2022/03/10
- [PATCH v5 6/8] multifd: Support for zero pages transmission,
Juan Quintela <=
- [PATCH v5 8/8] migration: Use multifd before we check for the zero page, Juan Quintela, 2022/03/10
- [PATCH v5 7/8] multifd: Zero pages transmission, Juan Quintela, 2022/03/10