qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC PATCH] migration: discard RAMBlocks of type ram_de


From: Zhang, Yulei
Subject: Re: [Qemu-devel] [RFC PATCH] migration: discard RAMBlocks of type ram_device
Date: Thu, 12 Apr 2018 15:59:24 +0000


> -----Original Message-----
> From: Alex Williamson [mailto:address@hidden
> Sent: Thursday, April 12, 2018 1:55 AM
> To: Cédric Le Goater <address@hidden>
> Cc: address@hidden; Juan Quintela <address@hidden>; Dr .
> David Alan Gilbert <address@hidden>; David Gibson
> <address@hidden>; Zhang, Yulei <address@hidden>; Tian,
> Kevin <address@hidden>; address@hidden;
> address@hidden; address@hidden; Wang, Zhi A
> <address@hidden>
> Subject: Re: [Qemu-devel] [RFC PATCH] migration: discard RAMBlocks of type
> ram_device
> 
> [cc +folks working on vfio-mdev migration]
> 
> On Wed, 11 Apr 2018 19:20:14 +0200
> Cédric Le Goater <address@hidden> wrote:
> 
> > Here is some context for this strange change request.
> >
> > On the POWER9 processor, the XIVE interrupt controller can control
> > interrupt sources using MMIO to trigger events, to EOI or to turn off
> > the sources. Priority management and interrupt acknowledgment is also
> > controlled by MMIO in the presenter subengine.
> >
> > These MMIO regions are exposed to guests in QEMU with a set of 'ram
> > device' memory mappings, similarly to VFIO, and the VMAs are populated
> > dynamically with the appropriate pages using a fault handler.
> >
> > But, these regions are an issue for migration. We need to discard the
> > associated RAMBlocks from the RAM state on the source VM and let the
> > destination VM rebuild the memory mappings on the new host in the
> > post_load() operation just before resuming the system.
> >
> > This is the goal of the following proposal. Does it make sense ? It
> > seems to be working enough to migrate a running guest but there might
> > be a better, more subtle, approach.
> 
> Yulei, is this something you've run into with GVT-g migration?  I don't see
> how we can read from or write to ram_device regions in a useful way during
> migration anyway, so the change initially looks correct to me.
> Thanks,
> 
> Alex
> 

Didn't meet such issue before. I think the change will be fine if the vendor 
driver
handle the reconstruction well on the target side. And I agree with Dave's 
suggestion,
how about vendor driver report a flag about the mapped region to indicate it can
be registered as migratable memory block or not.   

Yulei
> > Signed-off-by: Cédric Le Goater <address@hidden>
> > ---
> >  migration/ram.c | 42 ++++++++++++++++++++++++++++++++++++++++--
> >  1 file changed, 40 insertions(+), 2 deletions(-)
> >
> > diff --git a/migration/ram.c b/migration/ram.c index
> > 0e90efa09236..6404ccd046d8 100644
> > --- a/migration/ram.c
> > +++ b/migration/ram.c
> > @@ -780,6 +780,10 @@ unsigned long
> migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
> >      unsigned long *bitmap = rb->bmap;
> >      unsigned long next;
> >
> > +    if (memory_region_is_ram_device(rb->mr)) {
> > +        return size;
> > +    }
> > +
> >      if (rs->ram_bulk_stage && start > 0) {
> >          next = start + 1;
> >      } else {
> > @@ -826,6 +830,9 @@ uint64_t ram_pagesize_summary(void)
> >      uint64_t summary = 0;
> >
> >      RAMBLOCK_FOREACH(block) {
> > +        if (memory_region_is_ram_device(block->mr)) {
> > +            continue;
> > +        }
> >          summary |= block->page_size;
> >      }
> >
> > @@ -850,6 +857,9 @@ static void migration_bitmap_sync(RAMState *rs)
> >      qemu_mutex_lock(&rs->bitmap_mutex);
> >      rcu_read_lock();
> >      RAMBLOCK_FOREACH(block) {
> > +        if (memory_region_is_ram_device(block->mr)) {
> > +            continue;
> > +        }
> >          migration_bitmap_sync_range(rs, block, 0, block->used_length);
> >      }
> >      rcu_read_unlock();
> > @@ -1499,6 +1509,10 @@ static int ram_save_host_page(RAMState *rs,
> PageSearchStatus *pss,
> >      size_t pagesize_bits =
> >          qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
> >
> > +    if (memory_region_is_ram_device(pss->block->mr)) {
> > +        return 0;
> > +    }
> > +
> >      do {
> >          tmppages = ram_save_target_page(rs, pss, last_stage);
> >          if (tmppages < 0) {
> > @@ -1588,6 +1602,9 @@ uint64_t ram_bytes_total(void)
> >
> >      rcu_read_lock();
> >      RAMBLOCK_FOREACH(block) {
> > +        if (memory_region_is_ram_device(block->mr)) {
> > +            continue;
> > +        }
> >          total += block->used_length;
> >      }
> >      rcu_read_unlock();
> > @@ -1643,6 +1660,9 @@ static void ram_save_cleanup(void *opaque)
> >      memory_global_dirty_log_stop();
> >
> >      QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
> > +        if (memory_region_is_ram_device(block->mr)) {
> > +            continue;
> > +        }
> >          g_free(block->bmap);
> >          block->bmap = NULL;
> >          g_free(block->unsentmap);
> > @@ -1710,6 +1730,9 @@ void
> ram_postcopy_migrated_memory_release(MigrationState *ms)
> >          unsigned long range = block->used_length >> TARGET_PAGE_BITS;
> >          unsigned long run_start = find_next_zero_bit(bitmap, range,
> > 0);
> >
> > +        if (memory_region_is_ram_device(block->mr)) {
> > +            continue;
> > +        }
> >          while (run_start < range) {
> >              unsigned long run_end = find_next_bit(bitmap, range, run_start 
> > +
> 1);
> >              ram_discard_range(block->idstr, run_start <<
> > TARGET_PAGE_BITS, @@ -1784,8 +1807,13 @@ static int
> postcopy_each_ram_send_discard(MigrationState *ms)
> >      int ret;
> >
> >      RAMBLOCK_FOREACH(block) {
> > -        PostcopyDiscardState *pds =
> > -            postcopy_discard_send_init(ms, block->idstr);
> > +        PostcopyDiscardState *pds;
> > +
> > +        if (memory_region_is_ram_device(block->mr)) {
> > +            continue;
> > +        }
> > +
> > +        pds = postcopy_discard_send_init(ms, block->idstr);
> >
> >          /*
> >           * Postcopy sends chunks of bitmap over the wire, but it @@
> > -1996,6 +2024,10 @@ int
> ram_postcopy_send_discard_bitmap(MigrationState *ms)
> >          unsigned long *bitmap = block->bmap;
> >          unsigned long *unsentmap = block->unsentmap;
> >
> > +        if (memory_region_is_ram_device(block->mr)) {
> > +            continue;
> > +        }
> > +
> >          if (!unsentmap) {
> >              /* We don't have a safe way to resize the sentmap, so
> >               * if the bitmap was resized it will be NULL at this @@
> > -2151,6 +2183,9 @@ static void ram_list_init_bitmaps(void)
> >      /* Skip setting bitmap if there is no RAM */
> >      if (ram_bytes_total()) {
> >          QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
> > +            if (memory_region_is_ram_device(block->mr)) {
> > +                continue;
> > +            }
> >              pages = block->max_length >> TARGET_PAGE_BITS;
> >              block->bmap = bitmap_new(pages);
> >              bitmap_set(block->bmap, 0, pages); @@ -2227,6 +2262,9 @@
> > static int ram_save_setup(QEMUFile *f, void *opaque)
> >      qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
> >
> >      RAMBLOCK_FOREACH(block) {
> > +        if (memory_region_is_ram_device(block->mr)) {
> > +            continue;
> > +        }
> >          qemu_put_byte(f, strlen(block->idstr));
> >          qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
> >          qemu_put_be64(f, block->used_length);


reply via email to

[Prev in Thread] Current Thread [Next in Thread]