[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [patch 1/5][v2] Extract code from get_cluster_offset()
From: |
Laurent Vivier |
Subject: |
[Qemu-devel] [patch 1/5][v2] Extract code from get_cluster_offset() |
Date: |
Tue, 29 Jul 2008 16:13:53 +0200 |
User-agent: |
quilt/0.45-1 |
Extract code from get_cluster_offset() into new functions:
- seek_l2_table()
Search an l2 offset in the l2_cache table.
- l2_load()
Read the l2 entry from disk
- l2_allocate()
Allocate a new l2 entry.
Signed-off-by: Laurent Vivier <address@hidden>
---
block-qcow2.c | 176 +++++++++++++++++++++++++++++++++++-----------------------
1 file changed, 109 insertions(+), 67 deletions(-)
Index: qemu/block-qcow2.c
===================================================================
--- qemu.orig/block-qcow2.c 2008-07-29 15:12:37.000000000 +0200
+++ qemu/block-qcow2.c 2008-07-29 15:22:04.000000000 +0200
@@ -480,96 +480,138 @@ static int grow_l1_table(BlockDriverStat
return -EIO;
}
-/* 'allocate' is:
- *
- * 0 not to allocate.
- *
- * 1 to allocate a normal cluster (for sector indexes 'n_start' to
- * 'n_end')
- *
- * 2 to allocate a compressed cluster of size
- * 'compressed_size'. 'compressed_size' must be > 0 and <
- * cluster_size
- *
- * return 0 if not allocated.
- */
+static uint64_t *seek_l2_table(BDRVQcowState *s, uint64_t l2_offset)
+{
+ int i,j;
+
+ for(i = 0; i < L2_CACHE_SIZE; i++) {
+ if (l2_offset == s->l2_cache_offsets[i]) {
+ /* increment the hit count */
+ if (++s->l2_cache_counts[i] == 0xffffffff) {
+ for(j = 0; j < L2_CACHE_SIZE; j++) {
+ s->l2_cache_counts[j] >>= 1;
+ }
+ }
+ return s->l2_cache + (i << s->l2_bits);
+ }
+ }
+ return NULL;
+}
+
+static int l2_load(BlockDriverState *bs, int l1_index,
+ uint64_t **new_l2_table, uint64_t *new_l2_offset)
+{
+ BDRVQcowState *s = bs->opaque;
+ int min_index;
+ uint64_t *l2_table, l2_offset;
+
+ l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
+ l2_table = seek_l2_table(s, l2_offset);
+ if (l2_table != NULL)
+ goto found;
+
+ /* not found: load a new entry in the least used one */
+
+ min_index = l2_cache_new_entry(bs);
+ l2_table = s->l2_cache + (min_index << s->l2_bits);
+ if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t))
!=
+ s->l2_size * sizeof(uint64_t))
+ return 0;
+ s->l2_cache_offsets[min_index] = l2_offset;
+ s->l2_cache_counts[min_index] = 1;
+
+found:
+ *new_l2_table = l2_table;
+ *new_l2_offset = l2_offset;
+
+ return 1;
+}
+
+static int l2_allocate(BlockDriverState *bs, int l1_index,
+ uint64_t **new_l2_table, uint64_t *new_l2_offset)
+{
+ BDRVQcowState *s = bs->opaque;
+ int min_index;
+ uint64_t old_l2_offset, tmp;
+ uint64_t *l2_table, l2_offset;
+
+ old_l2_offset = s->l1_table[l1_index];
+
+ /* allocate a new l2 entry */
+
+ l2_offset = alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
+
+ /* update the L1 entry */
+
+ s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
+
+ tmp = cpu_to_be64(l2_offset | QCOW_OFLAG_COPIED);
+ if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp),
+ &tmp, sizeof(tmp)) != sizeof(tmp))
+ return 0;
+
+ min_index = l2_cache_new_entry(bs);
+ l2_table = s->l2_cache + (min_index << s->l2_bits);
+
+ if (old_l2_offset == 0) {
+ memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
+ } else {
+ if (bdrv_pread(s->hd, old_l2_offset,
+ l2_table, s->l2_size * sizeof(uint64_t)) !=
+ s->l2_size * sizeof(uint64_t))
+ return 0;
+ }
+ if (bdrv_pwrite(s->hd, l2_offset,
+ l2_table, s->l2_size * sizeof(uint64_t)) !=
+ s->l2_size * sizeof(uint64_t))
+ return 0;
+
+ s->l2_cache_offsets[min_index] = l2_offset;
+ s->l2_cache_counts[min_index] = 1;
+
+ *new_l2_table = l2_table;
+ *new_l2_offset = l2_offset;
+
+ return 1;
+}
+
static uint64_t get_cluster_offset(BlockDriverState *bs,
uint64_t offset, int allocate,
int compressed_size,
int n_start, int n_end)
{
BDRVQcowState *s = bs->opaque;
- int min_index, i, j, l1_index, l2_index, ret;
- uint64_t l2_offset, *l2_table, cluster_offset, tmp, old_l2_offset;
+ int l1_index, l2_index, ret;
+ uint64_t l2_offset, *l2_table, cluster_offset, tmp;
l1_index = offset >> (s->l2_bits + s->cluster_bits);
if (l1_index >= s->l1_size) {
/* outside l1 table is allowed: we grow the table if needed */
if (!allocate)
return 0;
- if (grow_l1_table(bs, l1_index + 1) < 0)
+ ret = grow_l1_table(bs, l1_index + 1);
+ if (ret < 0)
return 0;
}
l2_offset = s->l1_table[l1_index];
if (!l2_offset) {
if (!allocate)
return 0;
- l2_allocate:
- old_l2_offset = l2_offset;
- /* allocate a new l2 entry */
- l2_offset = alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
- /* update the L1 entry */
- s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
- tmp = cpu_to_be64(l2_offset | QCOW_OFLAG_COPIED);
- if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp),
- &tmp, sizeof(tmp)) != sizeof(tmp))
- return 0;
- min_index = l2_cache_new_entry(bs);
- l2_table = s->l2_cache + (min_index << s->l2_bits);
-
- if (old_l2_offset == 0) {
- memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
- } else {
- if (bdrv_pread(s->hd, old_l2_offset,
- l2_table, s->l2_size * sizeof(uint64_t)) !=
- s->l2_size * sizeof(uint64_t))
- return 0;
- }
- if (bdrv_pwrite(s->hd, l2_offset,
- l2_table, s->l2_size * sizeof(uint64_t)) !=
- s->l2_size * sizeof(uint64_t))
+ ret = l2_allocate(bs, l1_index, &l2_table, &l2_offset);
+ if (ret == 0)
return 0;
} else {
- if (!(l2_offset & QCOW_OFLAG_COPIED)) {
- if (allocate) {
- free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
- goto l2_allocate;
- }
+ if (!(l2_offset & QCOW_OFLAG_COPIED) && allocate) {
+ free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
+ ret = l2_allocate(bs, l1_index, &l2_table, &l2_offset);
+ if (ret == 0)
+ return 0;
} else {
- l2_offset &= ~QCOW_OFLAG_COPIED;
- }
- for(i = 0; i < L2_CACHE_SIZE; i++) {
- if (l2_offset == s->l2_cache_offsets[i]) {
- /* increment the hit count */
- if (++s->l2_cache_counts[i] == 0xffffffff) {
- for(j = 0; j < L2_CACHE_SIZE; j++) {
- s->l2_cache_counts[j] >>= 1;
- }
- }
- l2_table = s->l2_cache + (i << s->l2_bits);
- goto found;
- }
+ ret = l2_load(bs, l1_index, &l2_table, &l2_offset);
+ if (ret == 0)
+ return 0;
}
- /* not found: load a new entry in the least used one */
- min_index = l2_cache_new_entry(bs);
- l2_table = s->l2_cache + (min_index << s->l2_bits);
- if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size *
sizeof(uint64_t)) !=
- s->l2_size * sizeof(uint64_t))
- return 0;
}
- s->l2_cache_offsets[min_index] = l2_offset;
- s->l2_cache_counts[min_index] = 1;
- found:
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
cluster_offset = be64_to_cpu(l2_table[l2_index]);
if (!cluster_offset) {
--
- [Qemu-devel] [patch 0/5][v2] qcow2: improve I/O performance with cache=off, Laurent Vivier, 2008/07/29
- [Qemu-devel] [patch 1/5][v2] Extract code from get_cluster_offset(),
Laurent Vivier <=
- [Qemu-devel] [patch 4/5][v2] Aggregate same type clusters., Laurent Vivier, 2008/07/29
- [Qemu-devel] [patch 3/5][v2] Extract compressing part from alloc_cluster_offset(), Laurent Vivier, 2008/07/29
- [Qemu-devel] [patch 2/5][v2] Divide get_cluster_offset(), Laurent Vivier, 2008/07/29
- [Qemu-devel] [patch 5/5][v2] Try to aggregate free clusters and freed clusters, Laurent Vivier, 2008/07/29
- Re: [Qemu-devel] [patch 0/5][v2] qcow2: improve I/O performance with cache=off, Anthony Liguori, 2008/07/29