qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v5 08/11] qcow2: Rebuild refcount structure duri


From: Benoît Canet
Subject: Re: [Qemu-devel] [PATCH v5 08/11] qcow2: Rebuild refcount structure during check
Date: Fri, 10 Oct 2014 12:44:39 +0000
User-agent: Mutt/1.5.21 (2010-09-15)

> +        *nb_clusters = cluster + cluster_count - contiguous_free_clusters;
> +        *refcount_table = g_try_realloc(*refcount_table,
> +                                        *nb_clusters * sizeof(uint16_t));

Something tells me that these sizeof(uint16_t) are connected to 
s->refcount_order
and indirectely to REFCOUNT_SHIFT and that this code could benefit from this
relationship thus probably saving you work in the future.


> +        if (!*refcount_table) {
> +            return -ENOMEM;
> +        }
> +
> +        memset(*refcount_table + old_nb_clusters, 0,
> +               (*nb_clusters - old_nb_clusters) * sizeof(uint16_t));
> +    }
> +
> +    /* Go back to the first free cluster */
> +    cluster -= contiguous_free_clusters;
> +    for (i = 0; i < cluster_count; i++) {
> +        (*refcount_table)[cluster + i] = 1;
> +    }
> +
> +    return cluster << s->cluster_bits;
> +}
> +
> +/*
> + * Creates a new refcount structure based solely on the in-memory information
> + * given through *refcount_table. All necessary allocations will be reflected
> + * in that array.
> + *
> + * On success, the old refcount structure is leaked (it will be covered by 
> the
> + * new refcount structure).
> + */
> +static int rebuild_refcount_structure(BlockDriverState *bs,
> +                                      BdrvCheckResult *res,
> +                                      uint16_t **refcount_table,
> +                                      int64_t *nb_clusters)
> +{
> +    BDRVQcowState *s = bs->opaque;
> +    int64_t first_free_cluster = 0, rt_ofs = -1, cluster = 0;
> +    int64_t rb_ofs, rb_start, rb_index;
> +    uint32_t reftable_size = 0;
> +    uint64_t *reftable = NULL;
> +    uint16_t *on_disk_rb;
> +    int i, ret = 0;
> +    struct {
> +        uint64_t rt_offset;
> +        uint32_t rt_clusters;
> +    } QEMU_PACKED rt_offset_and_clusters;
> +
> +    qcow2_cache_empty(bs, s->refcount_block_cache);
> +
> +write_refblocks:
> +    for (; cluster < *nb_clusters; cluster++) {
> +        if (!(*refcount_table)[cluster]) {
> +            continue;
> +        }
> +
> +        rb_index = cluster >> s->refcount_block_bits;
> +        rb_start = rb_index << s->refcount_block_bits;
> +
> +        /* Don't allocate a cluster in a refblock already written to disk */
> +        if (first_free_cluster < rb_start) {
> +            first_free_cluster = rb_start;
> +        }
> +        rb_ofs = alloc_clusters_imrt(bs, 1, refcount_table, nb_clusters,
> +                                     &first_free_cluster);
> +        if (rb_ofs < 0) {
> +            fprintf(stderr, "ERROR allocating refblock: %s\n", 
> strerror(-ret));
> +            res->check_errors++;
> +            ret = rb_ofs;
> +            goto fail;
> +        }
> +
> +        if (reftable_size <= rb_index) {
> +            uint32_t old_rt_size = reftable_size;
> +            reftable_size = ROUND_UP((rb_index + 1) * sizeof(uint64_t),
> +                                     s->cluster_size) / sizeof(uint64_t);
> +            reftable = g_try_realloc(reftable,
> +                                     reftable_size * sizeof(uint64_t));
> +            if (!reftable) {
> +                res->check_errors++;
> +                ret = -ENOMEM;
> +                goto fail;
> +            }
> +
> +            memset(reftable + old_rt_size, 0,
> +                   (reftable_size - old_rt_size) * sizeof(uint64_t));
> +
> +            /* The offset we have for the reftable is now no longer valid;
> +             * this will leak that range, but we can easily fix that by 
> running
> +             * a leak-fixing check after this rebuild operation */
> +            rt_ofs = -1;
> +        }
> +        reftable[rb_index] = rb_ofs;
> +
> +        /* If this is apparently the last refblock (for now), try to squeeze 
> the
> +         * reftable in */
> +        if (rb_index == (*nb_clusters - 1) >> s->refcount_block_bits &&
> +            rt_ofs < 0)
> +        {
> +            rt_ofs = alloc_clusters_imrt(bs, size_to_clusters(s, 
> reftable_size *
> +                                                              
> sizeof(uint64_t)),
> +                                         refcount_table, nb_clusters,
> +                                         &first_free_cluster);
> +            if (rt_ofs < 0) {
> +                fprintf(stderr, "ERROR allocating reftable: %s\n",
> +                        strerror(-ret));
> +                res->check_errors++;
> +                ret = rt_ofs;
> +                goto fail;
> +            }
> +        }
> +
> +        ret = qcow2_pre_write_overlap_check(bs, 0, rb_ofs, s->cluster_size);
> +        if (ret < 0) {
> +            fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
> +            goto fail;
> +        }
> +
> +        on_disk_rb = g_malloc0(s->cluster_size);
> +        for (i = 0; i < s->cluster_size / sizeof(uint16_t) &&
> +                    rb_start + i < *nb_clusters; i++)
> +        {
> +            on_disk_rb[i] = cpu_to_be16((*refcount_table)[rb_start + i]);
> +        }
> +
> +        ret = bdrv_write(bs->file, rb_ofs / BDRV_SECTOR_SIZE,
> +                         (void *)on_disk_rb, s->cluster_sectors);
> +        g_free(on_disk_rb);
> +        if (ret < 0) {
> +            fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
> +            goto fail;
> +        }
> +
> +        /* Go to the end of this refblock */
> +        cluster = rb_start + s->cluster_size / sizeof(uint16_t) - 1;
> +    }
> +
> +    if (rt_ofs < 0) {
> +        int64_t post_rb_start = ROUND_UP(*nb_clusters,
> +                                         s->cluster_size / sizeof(uint16_t));
> +
> +        /* Not pretty but simple */
> +        if (first_free_cluster < post_rb_start) {
> +            first_free_cluster = post_rb_start;
> +        }
> +        rt_ofs = alloc_clusters_imrt(bs, size_to_clusters(s, reftable_size *
> +                                                          sizeof(uint64_t)),
> +                                     refcount_table, nb_clusters,
> +                                     &first_free_cluster);
> +        if (rt_ofs < 0) {
> +            fprintf(stderr, "ERROR allocating reftable: %s\n", 
> strerror(-ret));
> +            res->check_errors++;
> +            ret = rt_ofs;
> +            goto fail;
> +        }
> +
> +        goto write_refblocks;
> +    }
> +
> +    assert(reftable);
> +
> +    for (rb_index = 0; rb_index < reftable_size; rb_index++) {
> +        cpu_to_be64s(&reftable[rb_index]);
> +    }
> +
> +    ret = qcow2_pre_write_overlap_check(bs, 0, rt_ofs,
> +                                        reftable_size * sizeof(uint64_t));
> +    if (ret < 0) {
> +        fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
> +        goto fail;
> +    }
> +
> +    ret = bdrv_write(bs->file, rt_ofs / BDRV_SECTOR_SIZE, (void *)reftable,
> +                     reftable_size * sizeof(uint64_t) / BDRV_SECTOR_SIZE);
> +    if (ret < 0) {
> +        fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
> +        goto fail;
> +    }
> +
> +    /* Enter new reftable into the image header */
> +    cpu_to_be64w(&rt_offset_and_clusters.rt_offset, rt_ofs);
> +    cpu_to_be32w(&rt_offset_and_clusters.rt_clusters,
> +                 size_to_clusters(s, reftable_size * sizeof(uint64_t)));
> +    ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader,
> +                                              refcount_table_offset),
> +                           &rt_offset_and_clusters,
> +                           sizeof(rt_offset_and_clusters));
> +    if (ret < 0) {
> +        fprintf(stderr, "ERROR setting reftable: %s\n", strerror(-ret));
> +        goto fail;
> +    }
> +
> +    for (rb_index = 0; rb_index < reftable_size; rb_index++) {
> +        be64_to_cpus(&reftable[rb_index]);
> +    }
> +    s->refcount_table = reftable;
> +    s->refcount_table_offset = rt_ofs;
> +    s->refcount_table_size = reftable_size;
> +
> +    return 0;
> +
> +fail:
> +    g_free(reftable);
> +    return ret;
> +}
> +
> +/*
>   * Checks an image for refcount consistency.
>   *
>   * Returns 0 if no errors are found, the number of errors in case the image 
> is
> @@ -1612,6 +1872,7 @@ int qcow2_check_refcounts(BlockDriverState *bs, 
> BdrvCheckResult *res,
>                            BdrvCheckMode fix)
>  {
>      BDRVQcowState *s = bs->opaque;
> +    BdrvCheckResult pre_compare_res;
>      int64_t size, highest_cluster, nb_clusters;
>      uint16_t *refcount_table = NULL;
>      bool rebuild = false;
> @@ -1638,11 +1899,30 @@ int qcow2_check_refcounts(BlockDriverState *bs, 
> BdrvCheckResult *res,
>          goto fail;
>      }
>  
> -    compare_refcounts(bs, res, fix, &rebuild, &highest_cluster, 
> refcount_table,
> +    /* In case we don't need to rebuild the refcount structure (but want to 
> fix
> +     * something), this function is immediately called again, in which case 
> the
> +     * result should be ignored */
> +    pre_compare_res = *res;
> +    compare_refcounts(bs, res, 0, &rebuild, &highest_cluster, refcount_table,
>                        nb_clusters);
>  
> -    if (rebuild) {
> -        fprintf(stderr, "ERROR need to rebuild refcount structures\n");
> +    if (rebuild && (fix & BDRV_FIX_ERRORS)) {
> +        fprintf(stderr, "Rebuilding refcount structure\n");
> +        ret = rebuild_refcount_structure(bs, res, &refcount_table,
> +                                         &nb_clusters);
> +        if (ret < 0) {
> +            goto fail;
> +        }
> +    } else if (fix) {
> +        if (rebuild) {
> +            fprintf(stderr, "ERROR need to rebuild refcount structures\n");
> +        }
> +
> +        if (res->leaks || res->corruptions) {
> +            *res = pre_compare_res;
> +            compare_refcounts(bs, res, fix, &rebuild, &highest_cluster,
> +                              refcount_table, nb_clusters);
> +        }
>      }
>  
>      /* check OFLAG_COPIED */
> -- 
> 2.1.0
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]