[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH v2 2/5] migration: create a dedicated connection
From: |
Dr. David Alan Gilbert |
Subject: |
Re: [Qemu-devel] [PATCH v2 2/5] migration: create a dedicated connection for rdma return path |
Date: |
Thu, 26 Apr 2018 17:19:59 +0100 |
User-agent: |
Mutt/1.9.5 (2018-04-13) |
* Lidong Chen (address@hidden) wrote:
> If start a RDMA migration with postcopy enabled, the source qemu
> establish a dedicated connection for return path.
>
> Signed-off-by: Lidong Chen <address@hidden>
> Signed-off-by: Dr. David Alan Gilbert <address@hidden>
Oops, I should have Reviewed-by rather than Signed-off-by, so:
Reviewed-by: Dr. David Alan Gilbert <address@hidden>
> ---
> migration/rdma.c | 94
> ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 91 insertions(+), 3 deletions(-)
>
> diff --git a/migration/rdma.c b/migration/rdma.c
> index a22be43..c745427 100644
> --- a/migration/rdma.c
> +++ b/migration/rdma.c
> @@ -387,6 +387,10 @@ typedef struct RDMAContext {
> uint64_t unregistrations[RDMA_SIGNALED_SEND_MAX];
>
> GHashTable *blockmap;
> +
> + /* the RDMAContext for return path */
> + struct RDMAContext *return_path;
> + bool is_return_path;
> } RDMAContext;
>
> #define TYPE_QIO_CHANNEL_RDMA "qio-channel-rdma"
> @@ -2329,10 +2333,22 @@ static void qemu_rdma_cleanup(RDMAContext *rdma)
> rdma_destroy_id(rdma->cm_id);
> rdma->cm_id = NULL;
> }
> +
> + /* the destination side, listen_id and channel is shared */
> if (rdma->listen_id) {
> - rdma_destroy_id(rdma->listen_id);
> + if (!rdma->is_return_path) {
> + rdma_destroy_id(rdma->listen_id);
> + }
> rdma->listen_id = NULL;
> +
> + if (rdma->channel) {
> + if (!rdma->is_return_path) {
> + rdma_destroy_event_channel(rdma->channel);
> + }
> + rdma->channel = NULL;
> + }
> }
> +
> if (rdma->channel) {
> rdma_destroy_event_channel(rdma->channel);
> rdma->channel = NULL;
> @@ -2561,6 +2577,25 @@ err_dest_init_create_listen_id:
>
> }
>
> +static void qemu_rdma_return_path_dest_init(RDMAContext *rdma_return_path,
> + RDMAContext *rdma)
> +{
> + int idx;
> +
> + for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
> + rdma_return_path->wr_data[idx].control_len = 0;
> + rdma_return_path->wr_data[idx].control_curr = NULL;
> + }
> +
> + /*the CM channel and CM id is shared*/
> + rdma_return_path->channel = rdma->channel;
> + rdma_return_path->listen_id = rdma->listen_id;
> +
> + rdma->return_path = rdma_return_path;
> + rdma_return_path->return_path = rdma;
> + rdma_return_path->is_return_path = true;
> +}
> +
> static void *qemu_rdma_data_init(const char *host_port, Error **errp)
> {
> RDMAContext *rdma = NULL;
> @@ -3018,6 +3053,8 @@ err:
> return ret;
> }
>
> +static void rdma_accept_incoming_migration(void *opaque);
> +
> static int qemu_rdma_accept(RDMAContext *rdma)
> {
> RDMACapabilities cap;
> @@ -3112,7 +3149,14 @@ static int qemu_rdma_accept(RDMAContext *rdma)
> }
> }
>
> - qemu_set_fd_handler(rdma->channel->fd, NULL, NULL, NULL);
> + /* Accept the second connection request for return path */
> + if (migrate_postcopy() && !rdma->is_return_path) {
> + qemu_set_fd_handler(rdma->channel->fd,
> rdma_accept_incoming_migration,
> + NULL,
> + (void *)(intptr_t)rdma->return_path);
> + } else {
> + qemu_set_fd_handler(rdma->channel->fd, NULL, NULL, NULL);
> + }
>
> ret = rdma_accept(rdma->cm_id, &conn_param);
> if (ret) {
> @@ -3693,6 +3737,10 @@ static void rdma_accept_incoming_migration(void
> *opaque)
>
> trace_qemu_rdma_accept_incoming_migration_accepted();
>
> + if (rdma->is_return_path) {
> + return;
> + }
> +
> f = qemu_fopen_rdma(rdma, "rb");
> if (f == NULL) {
> ERROR(errp, "could not qemu_fopen_rdma!");
> @@ -3707,7 +3755,7 @@ static void rdma_accept_incoming_migration(void *opaque)
> void rdma_start_incoming_migration(const char *host_port, Error **errp)
> {
> int ret;
> - RDMAContext *rdma;
> + RDMAContext *rdma, *rdma_return_path;
> Error *local_err = NULL;
>
> trace_rdma_start_incoming_migration();
> @@ -3734,12 +3782,24 @@ void rdma_start_incoming_migration(const char
> *host_port, Error **errp)
>
> trace_rdma_start_incoming_migration_after_rdma_listen();
>
> + /* initialize the RDMAContext for return path */
> + if (migrate_postcopy()) {
> + rdma_return_path = qemu_rdma_data_init(host_port, &local_err);
> +
> + if (rdma_return_path == NULL) {
> + goto err;
> + }
> +
> + qemu_rdma_return_path_dest_init(rdma_return_path, rdma);
> + }
> +
> qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration,
> NULL, (void *)(intptr_t)rdma);
> return;
> err:
> error_propagate(errp, local_err);
> g_free(rdma);
> + g_free(rdma_return_path);
> }
>
> void rdma_start_outgoing_migration(void *opaque,
> @@ -3747,6 +3807,7 @@ void rdma_start_outgoing_migration(void *opaque,
> {
> MigrationState *s = opaque;
> RDMAContext *rdma = qemu_rdma_data_init(host_port, errp);
> + RDMAContext *rdma_return_path = NULL;
> int ret = 0;
>
> if (rdma == NULL) {
> @@ -3767,6 +3828,32 @@ void rdma_start_outgoing_migration(void *opaque,
> goto err;
> }
>
> + /* RDMA postcopy need a seprate queue pair for return path */
> + if (migrate_postcopy()) {
> + rdma_return_path = qemu_rdma_data_init(host_port, errp);
> +
> + if (rdma_return_path == NULL) {
> + goto err;
> + }
> +
> + ret = qemu_rdma_source_init(rdma_return_path,
> + s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL],
> errp);
> +
> + if (ret) {
> + goto err;
> + }
> +
> + ret = qemu_rdma_connect(rdma_return_path, errp);
> +
> + if (ret) {
> + goto err;
> + }
> +
> + rdma->return_path = rdma_return_path;
> + rdma_return_path->return_path = rdma;
> + rdma_return_path->is_return_path = true;
> + }
> +
> trace_rdma_start_outgoing_migration_after_rdma_connect();
>
> s->to_dst_file = qemu_fopen_rdma(rdma, "wb");
> @@ -3774,4 +3861,5 @@ void rdma_start_outgoing_migration(void *opaque,
> return;
> err:
> g_free(rdma);
> + g_free(rdma_return_path);
> }
> --
> 1.8.3.1
>
--
Dr. David Alan Gilbert / address@hidden / Manchester, UK
- [Qemu-devel] [PATCH v2 0/5] Enable postcopy RDMA live migration, Lidong Chen, 2018/04/25
- [Qemu-devel] [PATCH v2 1/5] migration: disable RDMA WRITE after postcopy started, Lidong Chen, 2018/04/25
- [Qemu-devel] [PATCH v2 2/5] migration: create a dedicated connection for rdma return path, Lidong Chen, 2018/04/25
- Re: [Qemu-devel] [PATCH v2 2/5] migration: create a dedicated connection for rdma return path,
Dr. David Alan Gilbert <=
- [Qemu-devel] [PATCH v2 3/5] migration: remove unnecessary variables len in QIOChannelRDMA, Lidong Chen, 2018/04/25
- [Qemu-devel] [PATCH v2 5/5] migration: Stop rdma yielding during incoming postcopy, Lidong Chen, 2018/04/25
- [Qemu-devel] [PATCH v2 4/5] migration: implement bi-directional RDMA QIOChannel, Lidong Chen, 2018/04/25