qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PULL 04/28] Store block name in local blocks structure


From: Juan Quintela
Subject: [Qemu-devel] [PULL 04/28] Store block name in local blocks structure
Date: Tue, 7 Jul 2015 15:08:45 +0200

From: "Dr. David Alan Gilbert" <address@hidden>

In a later patch the block name will be used to match up two views
of the block list.  Keep a copy of the block name with the local block
list.

(At some point it could be argued that it would be best just to let
migration see the innards of RAMBlock and avoid the need to use
foreach).

Signed-off-by: Dr. David Alan Gilbert <address@hidden>
Reviewed-by: Michael R. Hines <address@hidden>
Signed-off-by: Juan Quintela <address@hidden>
---
 migration/rdma.c | 35 +++++++++++++++++++++--------------
 trace-events     |  2 +-
 2 files changed, 22 insertions(+), 15 deletions(-)

diff --git a/migration/rdma.c b/migration/rdma.c
index fc6b81c..6fa9601 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -215,17 +215,18 @@ static void network_to_caps(RDMACapabilities *cap)
  * the information. It's small anyway, so a list is overkill.
  */
 typedef struct RDMALocalBlock {
-    uint8_t  *local_host_addr; /* local virtual address */
-    uint64_t remote_host_addr; /* remote virtual address */
-    uint64_t offset;
-    uint64_t length;
-    struct   ibv_mr **pmr;     /* MRs for chunk-level registration */
-    struct   ibv_mr *mr;       /* MR for non-chunk-level registration */
-    uint32_t *remote_keys;     /* rkeys for chunk-level registration */
-    uint32_t remote_rkey;      /* rkeys for non-chunk-level registration */
-    int      index;            /* which block are we */
-    bool     is_ram_block;
-    int      nb_chunks;
+    char          *block_name;
+    uint8_t       *local_host_addr; /* local virtual address */
+    uint64_t       remote_host_addr; /* remote virtual address */
+    uint64_t       offset;
+    uint64_t       length;
+    struct         ibv_mr **pmr;    /* MRs for chunk-level registration */
+    struct         ibv_mr *mr;      /* MR for non-chunk-level registration */
+    uint32_t      *remote_keys;     /* rkeys for chunk-level registration */
+    uint32_t       remote_rkey;     /* rkeys for non-chunk-level registration 
*/
+    int            index;           /* which block are we */
+    bool           is_ram_block;
+    int            nb_chunks;
     unsigned long *transit_bitmap;
     unsigned long *unregister_bitmap;
 } RDMALocalBlock;
@@ -511,7 +512,8 @@ static inline uint8_t *ram_chunk_end(const RDMALocalBlock 
*rdma_ram_block,
     return result;
 }

-static int rdma_add_block(RDMAContext *rdma, void *host_addr,
+static int rdma_add_block(RDMAContext *rdma, const char *block_name,
+                         void *host_addr,
                          ram_addr_t block_offset, uint64_t length)
 {
     RDMALocalBlocks *local = &rdma->local_ram_blocks;
@@ -539,6 +541,7 @@ static int rdma_add_block(RDMAContext *rdma, void 
*host_addr,

     block = &local->block[local->nb_blocks];

+    block->block_name = g_strdup(block_name);
     block->local_host_addr = host_addr;
     block->offset = block_offset;
     block->length = length;
@@ -554,7 +557,8 @@ static int rdma_add_block(RDMAContext *rdma, void 
*host_addr,

     g_hash_table_insert(rdma->blockmap, (void *) block_offset, block);

-    trace_rdma_add_block(local->nb_blocks, (uintptr_t) block->local_host_addr,
+    trace_rdma_add_block(block_name, local->nb_blocks,
+                         (uintptr_t) block->local_host_addr,
                          block->offset, block->length,
                          (uintptr_t) (block->local_host_addr + block->length),
                          BITS_TO_LONGS(block->nb_chunks) *
@@ -574,7 +578,7 @@ static int rdma_add_block(RDMAContext *rdma, void 
*host_addr,
 static int qemu_rdma_init_one_block(const char *block_name, void *host_addr,
     ram_addr_t block_offset, ram_addr_t length, void *opaque)
 {
-    return rdma_add_block(opaque, host_addr, block_offset, length);
+    return rdma_add_block(opaque, block_name, host_addr, block_offset, length);
 }

 /*
@@ -636,6 +640,9 @@ static int rdma_delete_block(RDMAContext *rdma, ram_addr_t 
block_offset)
     g_free(block->remote_keys);
     block->remote_keys = NULL;

+    g_free(block->block_name);
+    block->block_name = NULL;
+
     for (x = 0; x < local->nb_blocks; x++) {
         g_hash_table_remove(rdma->blockmap, (void *)(uintptr_t)old[x].offset);
     }
diff --git a/trace-events b/trace-events
index 0917df2..b29d8af 100644
--- a/trace-events
+++ b/trace-events
@@ -1458,7 +1458,7 @@ qemu_rdma_write_one_recvregres(int mykey, int theirkey, 
uint64_t chunk) "Receive
 qemu_rdma_write_one_sendreg(uint64_t chunk, int len, int index, int64_t 
offset) "Sending registration request chunk %" PRIu64 " for %d bytes, index: 
%d, offset: %" PRId64
 qemu_rdma_write_one_top(uint64_t chunks, uint64_t size) "Writing %" PRIu64 " 
chunks, (%" PRIu64 " MB)"
 qemu_rdma_write_one_zero(uint64_t chunk, int len, int index, int64_t offset) 
"Entire chunk is zero, sending compress: %" PRIu64 " for %d bytes, index: %d, 
offset: %" PRId64
-rdma_add_block(int block, uint64_t addr, uint64_t offset, uint64_t len, 
uint64_t end, uint64_t bits, int chunks) "Added Block: %d, addr: %" PRIu64 ", 
offset: %" PRIu64 " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " 
chunks %d"
+rdma_add_block(const char *block_name, int block, uint64_t addr, uint64_t 
offset, uint64_t len, uint64_t end, uint64_t bits, int chunks) "Added Block: 
'%s':%d, addr: %" PRIu64 ", offset: %" PRIu64 " length: %" PRIu64 " end: %" 
PRIu64 " bits %" PRIu64 " chunks %d"
 rdma_delete_block(int block, uint64_t addr, uint64_t offset, uint64_t len, 
uint64_t end, uint64_t bits, int chunks) "Deleted Block: %d, addr: %" PRIu64 ", 
offset: %" PRIu64 " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " 
chunks %d"
 rdma_start_incoming_migration(void) ""
 rdma_start_incoming_migration_after_dest_init(void) ""
-- 
2.4.3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]