[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v11 07/15] migration: Synchronize send threads
From: |
Juan Quintela |
Subject: |
[Qemu-devel] [PATCH v11 07/15] migration: Synchronize send threads |
Date: |
Fri, 16 Mar 2018 12:53:55 +0100 |
We synchronize all threads each RAM_SAVE_FLAG_EOS. Bitmap
synchronizations don't happen inside a ram section, so we are safe
about two channels trying to overwrite the same memory.
Signed-off-by: Juan Quintela <address@hidden>
---
migration/ram.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++-
migration/trace-events | 3 +++
2 files changed, 54 insertions(+), 1 deletion(-)
diff --git a/migration/ram.c b/migration/ram.c
index 7e60fc82a6..6aeb63f6ef 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -403,6 +403,7 @@ struct MultiFDSendParams {
QemuMutex mutex;
bool running;
bool quit;
+ bool sync;
};
typedef struct MultiFDSendParams MultiFDSendParams;
@@ -410,6 +411,8 @@ struct {
MultiFDSendParams *params;
/* number of created threads */
int count;
+ /* syncs main thread and channels */
+ QemuSemaphore sem_main;
} *multifd_send_state;
static void multifd_send_terminate_threads(Error *errp)
@@ -456,6 +459,7 @@ int multifd_save_cleanup(Error **errp)
g_free(p->name);
p->name = NULL;
}
+ qemu_sem_destroy(&multifd_send_state->sem_main);
g_free(multifd_send_state->params);
multifd_send_state->params = NULL;
g_free(multifd_send_state);
@@ -463,19 +467,59 @@ int multifd_save_cleanup(Error **errp)
return ret;
}
+static void multifd_send_sync_main(void)
+{
+ int i;
+
+ if (!migrate_use_multifd()) {
+ return;
+ }
+ for (i = 0; i < migrate_multifd_channels(); i++) {
+ MultiFDSendParams *p = &multifd_send_state->params[i];
+
+ trace_multifd_send_sync_signal(p->id, p->quit, p->running);
+
+ qemu_mutex_lock(&p->mutex);
+ p->sync = true;
+ qemu_mutex_unlock(&p->mutex);
+ qemu_sem_post(&p->sem);
+ }
+ for (i = 0; i < migrate_multifd_channels(); i++) {
+ MultiFDSendParams *p = &multifd_send_state->params[i];
+ bool wait;
+
+ trace_multifd_send_sync_wait(p->id, p->quit, p->running);
+
+ qemu_mutex_lock(&p->mutex);
+ wait = p->running;
+ qemu_mutex_unlock(&p->mutex);
+
+ if (wait) {
+ qemu_sem_wait(&multifd_send_state->sem_main);
+ }
+ }
+ trace_multifd_send_sync_main();
+}
+
static void *multifd_send_thread(void *opaque)
{
MultiFDSendParams *p = opaque;
while (true) {
+ qemu_sem_wait(&p->sem);
qemu_mutex_lock(&p->mutex);
+ if (p->sync) {
+ p->sync = false;
+ qemu_mutex_unlock(&p->mutex);
+ qemu_sem_post(&multifd_send_state->sem_main);
+ continue;
+ }
if (p->quit) {
p->running = false;
qemu_mutex_unlock(&p->mutex);
break;
}
qemu_mutex_unlock(&p->mutex);
- qemu_sem_wait(&p->sem);
}
return NULL;
@@ -493,6 +537,8 @@ int multifd_save_setup(void)
multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
atomic_set(&multifd_send_state->count, 0);
+ qemu_sem_init(&multifd_send_state->sem_main, 0);
+
for (i = 0; i < thread_count; i++) {
MultiFDSendParams *p = &multifd_send_state->params[i];
@@ -507,6 +553,7 @@ int multifd_save_setup(void)
atomic_inc(&multifd_send_state->count);
}
+
return 0;
}
@@ -2283,6 +2330,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
ram_control_before_iterate(f, RAM_CONTROL_SETUP);
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
+ multifd_send_sync_main();
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
return 0;
@@ -2358,6 +2406,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
*/
ram_control_after_iterate(f, RAM_CONTROL_ROUND);
+ multifd_send_sync_main();
out:
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
ram_counters.transferred += 8;
@@ -2411,6 +2460,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
rcu_read_unlock();
+ multifd_send_sync_main();
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
return 0;
diff --git a/migration/trace-events b/migration/trace-events
index 93961dea16..845612c177 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -77,6 +77,9 @@ ram_load_postcopy_loop(uint64_t addr, int flags) "@%" PRIx64
" %x"
ram_postcopy_send_discard_bitmap(void) ""
ram_save_page(const char *rbname, uint64_t offset, void *host) "%s: offset:
0x%" PRIx64 " host: %p"
ram_save_queue_pages(const char *rbname, size_t start, size_t len) "%s: start:
0x%zx len: 0x%zx"
+multifd_send_sync_main(void) ""
+multifd_send_sync_signal(uint8_t id, bool quit, bool running) "channel %d quit
%d running %d"
+multifd_send_sync_wait(uint8_t id, bool quit, bool running) "channel %d quit
%d running %d"
# migration/migration.c
await_return_path_close_on_source_close(void) ""
--
2.14.3
- Re: [Qemu-devel] [PATCH v11 06/15] migration: Export functions to create send channels, (continued)
- [Qemu-devel] [PATCH v11 01/15] migration: Set error state in case of error, Juan Quintela, 2018/03/16
- [Qemu-devel] [PATCH v11 08/15] migration: Synchronize recv threads, Juan Quintela, 2018/03/16
- [Qemu-devel] [PATCH v11 09/15] migration: Add multifd traces for start/end thread, Juan Quintela, 2018/03/16
- [Qemu-devel] [PATCH v11 04/15] migration: Introduce multifd_recv_new_channel(), Juan Quintela, 2018/03/16
- [Qemu-devel] [PATCH v11 03/15] migration: terminate_* can be called for other threads, Juan Quintela, 2018/03/16
- [Qemu-devel] [PATCH v11 07/15] migration: Synchronize send threads,
Juan Quintela <=
- [Qemu-devel] [PATCH v11 11/15] migration: Delay start of migration main routines, Juan Quintela, 2018/03/16
- [Qemu-devel] [PATCH v11 12/15] migration: Transmit initial package through the multifd channels, Juan Quintela, 2018/03/16
- [Qemu-devel] [PATCH v11 10/15] migration: Create multifd channels, Juan Quintela, 2018/03/16
- [Qemu-devel] [PATCH v11 13/15] migration: Create ram_multifd_page, Juan Quintela, 2018/03/16
- [Qemu-devel] [PATCH v11 14/15] migration: Create pages structure for reception, Juan Quintela, 2018/03/16
- [Qemu-devel] [PATCH v11 15/15] [RFC] migration: Send pages through the multifd channels, Juan Quintela, 2018/03/16
- Re: [Qemu-devel] [RFC v11 00/15] mutifd, Daniel P . Berrangé, 2018/03/16