[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v5 13/17] migration: Create thread infrastructure fo
From: |
Juan Quintela |
Subject: |
[Qemu-devel] [PATCH v5 13/17] migration: Create thread infrastructure for multifd recv side |
Date: |
Mon, 17 Jul 2017 15:42:34 +0200 |
We make the locking and the transfer of information specific, even if we
are still receiving things through the main thread.
Signed-off-by: Juan Quintela <address@hidden>
---
migration/ram.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 60 insertions(+), 8 deletions(-)
diff --git a/migration/ram.c b/migration/ram.c
index ac0742f..49c4880 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -49,6 +49,7 @@
#include "migration/colo.h"
#include "sysemu/sysemu.h"
#include "qemu/uuid.h"
+#include "qemu/iov.h"
/***********************************************************/
/* ram save/restore */
@@ -527,7 +528,7 @@ int multifd_save_setup(void)
return 0;
}
-static int multifd_send_page(uint8_t *address)
+static uint16_t multifd_send_page(uint8_t *address, bool last_page)
{
int i, j;
MultiFDSendParams *p = NULL; /* make happy gcc */
@@ -543,8 +544,10 @@ static int multifd_send_page(uint8_t *address)
pages.iov[pages.num].iov_len = TARGET_PAGE_SIZE;
pages.num++;
- if (pages.num < (pages.size - 1)) {
- return UINT16_MAX;
+ if (!last_page) {
+ if (pages.num < (pages.size - 1)) {
+ return UINT16_MAX;
+ }
}
qemu_sem_wait(&multifd_send_state->sem);
@@ -572,12 +575,17 @@ static int multifd_send_page(uint8_t *address)
}
struct MultiFDRecvParams {
+ /* not changed */
uint8_t id;
QemuThread thread;
QIOChannel *c;
+ QemuSemaphore ready;
QemuSemaphore sem;
QemuMutex mutex;
+ /* proteced by param mutex */
bool quit;
+ multifd_pages_t pages;
+ bool done;
};
typedef struct MultiFDRecvParams MultiFDRecvParams;
@@ -629,12 +637,20 @@ static void *multifd_recv_thread(void *opaque)
{
MultiFDRecvParams *p = opaque;
+ qemu_sem_post(&p->ready);
while (true) {
qemu_mutex_lock(&p->mutex);
if (p->quit) {
qemu_mutex_unlock(&p->mutex);
break;
}
+ if (p->pages.num) {
+ p->pages.num = 0;
+ p->done = true;
+ qemu_mutex_unlock(&p->mutex);
+ qemu_sem_post(&p->ready);
+ continue;
+ }
qemu_mutex_unlock(&p->mutex);
qemu_sem_wait(&p->sem);
}
@@ -679,8 +695,11 @@ gboolean multifd_new_channel(QIOChannel *ioc)
}
qemu_mutex_init(&p->mutex);
qemu_sem_init(&p->sem, 0);
+ qemu_sem_init(&p->ready, 0);
p->quit = false;
p->id = id;
+ p->done = false;
+ multifd_init_group(&p->pages);
p->c = ioc;
atomic_set(&multifd_recv_state->params[id], p);
qemu_thread_create(&p->thread, "multifd_recv", multifd_recv_thread, p,
@@ -709,6 +728,42 @@ int multifd_load_setup(void)
return 0;
}
+static void multifd_recv_page(uint8_t *address, uint16_t fd_num)
+{
+ int thread_count;
+ MultiFDRecvParams *p;
+ static multifd_pages_t pages;
+ static bool once;
+
+ if (!once) {
+ multifd_init_group(&pages);
+ once = true;
+ }
+
+ pages.iov[pages.num].iov_base = address;
+ pages.iov[pages.num].iov_len = TARGET_PAGE_SIZE;
+ pages.num++;
+
+ if (fd_num == UINT16_MAX) {
+ return;
+ }
+
+ thread_count = migrate_multifd_threads();
+ assert(fd_num < thread_count);
+ p = multifd_recv_state->params[fd_num];
+
+ qemu_sem_wait(&p->ready);
+
+ qemu_mutex_lock(&p->mutex);
+ p->done = false;
+ iov_copy(p->pages.iov, pages.num, pages.iov, pages.num, 0,
+ iov_size(pages.iov, pages.num));
+ p->pages.num = pages.num;
+ pages.num = 0;
+ qemu_mutex_unlock(&p->mutex);
+ qemu_sem_post(&p->sem);
+}
+
/**
* save_page_header: write page header to wire
*
@@ -1155,7 +1210,7 @@ static int ram_multifd_page(RAMState *rs,
PageSearchStatus *pss,
ram_counters.transferred +=
save_page_header(rs, rs->f, block,
offset | RAM_SAVE_FLAG_MULTIFD_PAGE);
- fd_num = multifd_send_page(p);
+ fd_num = multifd_send_page(p, rs->migration_dirty_pages == 1);
qemu_put_be16(rs->f, fd_num);
ram_counters.transferred += 2; /* size of fd_num */
qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
@@ -3020,10 +3075,7 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
case RAM_SAVE_FLAG_MULTIFD_PAGE:
fd_num = qemu_get_be16(f);
- if (fd_num != 0) {
- /* this is yet an unused variable, changed later */
- fd_num = fd_num;
- }
+ multifd_recv_page(host, fd_num);
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
break;
--
2.9.4
- [Qemu-devel] [PATCH v5 10/17] migration: Create ram_multifd_page, (continued)
- [Qemu-devel] [PATCH v5 10/17] migration: Create ram_multifd_page, Juan Quintela, 2017/07/17
- [Qemu-devel] [PATCH v5 11/17] migration: Really use multiple pages at a time, Juan Quintela, 2017/07/17
- [Qemu-devel] [PATCH v5 12/17] migration: Send the fd number which we are going to use for this page, Juan Quintela, 2017/07/17
- [Qemu-devel] [PATCH v5 13/17] migration: Create thread infrastructure for multifd recv side,
Juan Quintela <=
- [Qemu-devel] [PATCH v5 14/17] migration: Delay the start of reception on main channel, Juan Quintela, 2017/07/17
- [Qemu-devel] [PATCH v5 15/17] migration: Test new fd infrastructure, Juan Quintela, 2017/07/17
- [Qemu-devel] [PATCH v5 16/17] migration: Transfer pages over new channels, Juan Quintela, 2017/07/17
- [Qemu-devel] [PATCH v5 17/17] migration: Flush receive queue, Juan Quintela, 2017/07/17