[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH 06/13] HBitmap: Introduce "meta" bitmap to track
From: |
John Snow |
Subject: |
Re: [Qemu-devel] [PATCH 06/13] HBitmap: Introduce "meta" bitmap to track bit changes |
Date: |
Tue, 5 Jan 2016 19:09:32 -0500 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Thunderbird/38.3.0 |
On 01/04/2016 05:27 AM, Fam Zheng wrote:
> Upon each bit toggle, the corresponding bit in the meta bitmap will be
> set.
>
> Signed-off-by: Fam Zheng <address@hidden>
> ---
> include/qemu/hbitmap.h | 8 +++++++
> util/hbitmap.c | 61
> +++++++++++++++++++++++++++++++++++++-------------
> 2 files changed, 54 insertions(+), 15 deletions(-)
>
> diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
> index bb94a00..ed672e7 100644
> --- a/include/qemu/hbitmap.h
> +++ b/include/qemu/hbitmap.h
> @@ -181,6 +181,14 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap
> *hb, uint64_t first);
> */
> unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
>
> +/* hbitmap_create_meta
> + * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap.
> + *
> + * @hb: The HBitmap to operate on.
> + * @chunk_size: How many bits in @hb does one bit in the meta track.
> + */
> +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size);
> +
> /**
> * hbitmap_iter_next:
> * @hbi: HBitmapIter to operate on.
> diff --git a/util/hbitmap.c b/util/hbitmap.c
> index 50b888f..55d3182 100644
> --- a/util/hbitmap.c
> +++ b/util/hbitmap.c
> @@ -81,6 +81,9 @@ struct HBitmap {
> */
> int granularity;
>
> + /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
> + HBitmap *meta;
> +
> /* A number of progressively less coarse bitmaps (i.e. level 0 is the
> * coarsest). Each bit in level N represents a word in level N+1 that
> * has a set bit, except the last level where each bit represents the
> @@ -212,25 +215,27 @@ static uint64_t hb_count_between(HBitmap *hb, uint64_t
> start, uint64_t last)
> }
>
> /* Setting starts at the last layer and propagates up if an element
> - * changes from zero to non-zero.
> + * changes.
> */
Isn't this comment wrong anyway? hb_set_elem does not propagate upward
by itself.
> static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t
> last)
> {
> unsigned long mask;
> - bool changed;
> + unsigned long old;
>
> assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
> assert(start <= last);
>
> mask = 2UL << (last & (BITS_PER_LONG - 1));
> mask -= 1UL << (start & (BITS_PER_LONG - 1));
> - changed = (*elem == 0);
> + old = *elem;
> *elem |= mask;
> - return changed;
> + return old != *elem;
> }
>
> -/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */
> -static void hb_set_between(HBitmap *hb, int level, uint64_t start, uint64_t
> last)
> +/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
> + * Returns true if at least one bit is changed. */
> +static bool hb_set_between(HBitmap *hb, int level, uint64_t start,
> + uint64_t last)
> {
> size_t pos = start >> BITS_PER_LEVEL;
> size_t lastpos = last >> BITS_PER_LEVEL;
> @@ -259,22 +264,27 @@ static void hb_set_between(HBitmap *hb, int level,
> uint64_t start, uint64_t last
> if (level > 0 && changed) {
> hb_set_between(hb, level - 1, pos, lastpos);
> }
> + return changed;
> }
>
> void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)
> {
> /* Compute range in the last layer. */
> + uint64_t first, n;
> uint64_t last = start + count - 1;
>
> trace_hbitmap_set(hb, start, count,
> start >> hb->granularity, last >> hb->granularity);
>
> - start >>= hb->granularity;
> + first = start >> hb->granularity;
> last >>= hb->granularity;
> - count = last - start + 1;
> + n = last - first + 1;
>
> - hb->count += count - hb_count_between(hb, start, last);
> - hb_set_between(hb, HBITMAP_LEVELS - 1, start, last);
> + hb->count += n - hb_count_between(hb, first, last);
> + if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&
> + hb->meta) {
> + hbitmap_set(hb->meta, start, count);
> + }
> }
>
> /* Resetting works the other way round: propagate up if the new
> @@ -295,8 +305,10 @@ static inline bool hb_reset_elem(unsigned long *elem,
> uint64_t start, uint64_t l
> return blanked;
> }
>
> -/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */
> -static void hb_reset_between(HBitmap *hb, int level, uint64_t start,
> uint64_t last)
> +/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
> + * Returns true if at least one bit is changed. */
> +static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,
> + uint64_t last)
> {
> size_t pos = start >> BITS_PER_LEVEL;
> size_t lastpos = last >> BITS_PER_LEVEL;
> @@ -339,21 +351,28 @@ static void hb_reset_between(HBitmap *hb, int level,
> uint64_t start, uint64_t la
> if (level > 0 && changed) {
> hb_reset_between(hb, level - 1, pos, lastpos);
> }
> +
> + return changed;
> +
> }
>
> void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)
> {
> /* Compute range in the last layer. */
> + uint64_t first;
> uint64_t last = start + count - 1;
>
> trace_hbitmap_reset(hb, start, count,
> start >> hb->granularity, last >> hb->granularity);
>
> - start >>= hb->granularity;
> + first = start >> hb->granularity;
> last >>= hb->granularity;
>
> - hb->count -= hb_count_between(hb, start, last);
> - hb_reset_between(hb, HBITMAP_LEVELS - 1, start, last);
> + hb->count -= hb_count_between(hb, first, last);
> + if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&
> + hb->meta) {
> + hbitmap_set(hb->meta, start, count);
> + }
> }
>
> void hbitmap_reset_all(HBitmap *hb)
> @@ -384,6 +403,9 @@ void hbitmap_free(HBitmap *hb)
> for (i = HBITMAP_LEVELS; i-- > 0; ) {
> g_free(hb->levels[i]);
> }
> + if (hb->meta) {
> + hbitmap_free(hb->meta);
> + }
> g_free(hb);
> }
>
> @@ -493,3 +515,12 @@ bool hbitmap_merge(HBitmap *a, const HBitmap *b)
>
> return true;
> }
> +
> +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size)
> +{
> + assert(!(chunk_size & (chunk_size - 1)));
> + assert(!hb->meta);
> + hb->meta = hbitmap_alloc(hb->size << hb->granularity,
> + hb->granularity + ctz32(chunk_size));
> + return hb->meta;
> +}
>
I am a little skeptical of returning handles to internal state, but it's
the easiest way to re-use all of the existing HBitmap infrastructure to
iterate over the meta bitmap, so I guess this is fine.
Should we also add an hbitmap_destroy_meta for when we're done with it?
Regardless;
Reviewed-by: John Snow <address@hidden>
- [Qemu-devel] [PATCH 04/13] block: Remove unused typedef of BlockDriverDirtyHandler, (continued)
- [Qemu-devel] [PATCH 04/13] block: Remove unused typedef of BlockDriverDirtyHandler, Fam Zheng, 2016/01/04
- [Qemu-devel] [PATCH 03/13] block: Move block dirty bitmap code to separate files, Fam Zheng, 2016/01/04
- [Qemu-devel] [PATCH 05/13] block: Hide HBitmap in block dirty bitmap interface, Fam Zheng, 2016/01/04
- [Qemu-devel] [PATCH 07/13] tests: Add test code for meta bitmap, Fam Zheng, 2016/01/04
- [Qemu-devel] [PATCH 06/13] HBitmap: Introduce "meta" bitmap to track bit changes, Fam Zheng, 2016/01/04
- [Qemu-devel] [PATCH 08/13] block: Support meta dirty bitmap, Fam Zheng, 2016/01/04
[Qemu-devel] [PATCH 09/13] block: Add two dirty bitmap getters, Fam Zheng, 2016/01/04
[Qemu-devel] [PATCH 10/13] block: Assert that bdrv_release_dirty_bitmap succeeded, Fam Zheng, 2016/01/04