qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 44/51] ram: reorganize last_sent_block


From: Dr. David Alan Gilbert
Subject: Re: [Qemu-devel] [PATCH 44/51] ram: reorganize last_sent_block
Date: Fri, 31 Mar 2017 09:40:38 +0100
User-agent: Mutt/1.8.0 (2017-02-23)

* Juan Quintela (address@hidden) wrote:
> We were setting it far away of when we changed it.  Now everything is
> done inside save_page_header.  Once there, reorganize code to pass
> RAMState.
> 
> Signed-off-by: Juan Quintela <address@hidden>

Reviewed-by: Dr. David Alan Gilbert <address@hidden>

> ---
>  migration/ram.c | 36 +++++++++++++++---------------------
>  1 file changed, 15 insertions(+), 21 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index 83c749c..6cd77b5 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -453,18 +453,22 @@ void migrate_compress_threads_create(void)
>   * @offset: offset inside the block for the page
>   *          in the lower bits, it contains flags
>   */
> -static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t 
> offset)
> +static size_t save_page_header(RAMState *rs, RAMBlock *block, ram_addr_t 
> offset)
>  {
>      size_t size, len;
>  
> -    qemu_put_be64(f, offset);
> +    if (block == rs->last_sent_block) {
> +        offset |= RAM_SAVE_FLAG_CONTINUE;
> +    }
> +    qemu_put_be64(rs->f, offset);
>      size = 8;
>  
>      if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
>          len = strlen(block->idstr);
> -        qemu_put_byte(f, len);
> -        qemu_put_buffer(f, (uint8_t *)block->idstr, len);
> +        qemu_put_byte(rs->f, len);
> +        qemu_put_buffer(rs->f, (uint8_t *)block->idstr, len);
>          size += 1 + len;
> +        rs->last_sent_block = block;
>      }
>      return size;
>  }
> @@ -584,7 +588,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t 
> **current_data,
>      }
>  
>      /* Send XBZRLE based compressed page */
> -    bytes_xbzrle = save_page_header(rs->f, block,
> +    bytes_xbzrle = save_page_header(rs, block,
>                                      offset | RAM_SAVE_FLAG_XBZRLE);
>      qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
>      qemu_put_be16(rs->f, encoded_len);
> @@ -769,7 +773,7 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, 
> ram_addr_t offset,
>      if (is_zero_range(p, TARGET_PAGE_SIZE)) {
>          rs->zero_pages++;
>          rs->bytes_transferred +=
> -            save_page_header(rs->f, block, offset | RAM_SAVE_FLAG_COMPRESS);
> +            save_page_header(rs, block, offset | RAM_SAVE_FLAG_COMPRESS);
>          qemu_put_byte(rs->f, 0);
>          rs->bytes_transferred += 1;
>          pages = 1;
> @@ -826,9 +830,6 @@ static int ram_save_page(RAMState *rs, PageSearchStatus 
> *pss, bool last_stage)
>  
>      current_addr = block->offset + offset;
>  
> -    if (block == rs->last_sent_block) {
> -        offset |= RAM_SAVE_FLAG_CONTINUE;
> -    }
>      if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
>          if (ret != RAM_SAVE_CONTROL_DELAYED) {
>              if (bytes_xmit > 0) {
> @@ -860,8 +861,8 @@ static int ram_save_page(RAMState *rs, PageSearchStatus 
> *pss, bool last_stage)
>  
>      /* XBZRLE overflow or normal page */
>      if (pages == -1) {
> -        rs->bytes_transferred += save_page_header(rs->f, block,
> -                                               offset | RAM_SAVE_FLAG_PAGE);
> +        rs->bytes_transferred += save_page_header(rs, block,
> +                                                  offset | 
> RAM_SAVE_FLAG_PAGE);
>          if (send_async) {
>              qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
>                                    migrate_release_ram() &
> @@ -882,10 +883,11 @@ static int ram_save_page(RAMState *rs, PageSearchStatus 
> *pss, bool last_stage)
>  static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
>                                  ram_addr_t offset)
>  {
> +    RAMState *rs = &ram_state;
>      int bytes_sent, blen;
>      uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
>  
> -    bytes_sent = save_page_header(f, block, offset |
> +    bytes_sent = save_page_header(rs, block, offset |
>                                    RAM_SAVE_FLAG_COMPRESS_PAGE);
>      blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
>                                       migrate_compress_level());
> @@ -1016,7 +1018,7 @@ static int ram_save_compressed_page(RAMState *rs, 
> PageSearchStatus *pss,
>              pages = save_zero_page(rs, block, offset, p);
>              if (pages == -1) {
>                  /* Make sure the first page is sent out before other pages */
> -                bytes_xmit = save_page_header(rs->f, block, offset |
> +                bytes_xmit = save_page_header(rs, block, offset |
>                                                RAM_SAVE_FLAG_COMPRESS_PAGE);
>                  blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE,
>                                                   migrate_compress_level());
> @@ -1033,7 +1035,6 @@ static int ram_save_compressed_page(RAMState *rs, 
> PageSearchStatus *pss,
>                  ram_release_pages(block->idstr, pss->offset, pages);
>              }
>          } else {
> -            offset |= RAM_SAVE_FLAG_CONTINUE;
>              pages = save_zero_page(rs, block, offset, p);
>              if (pages == -1) {
>                  pages = compress_page_with_multi_thread(rs, block, offset);
> @@ -1330,13 +1331,6 @@ static int ram_save_target_page(RAMState *rs, 
> PageSearchStatus *pss,
>          if (unsentmap) {
>              clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
>          }
> -        /* Only update last_sent_block if a block was actually sent; xbzrle
> -         * might have decided the page was identical so didn't bother writing
> -         * to the stream.
> -         */
> -        if (res > 0) {
> -            rs->last_sent_block = pss->block;
> -        }
>      }
>  
>      return res;
> -- 
> 2.9.3
> 
--
Dr. David Alan Gilbert / address@hidden / Manchester, UK



reply via email to

[Prev in Thread] Current Thread [Next in Thread]