qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-block] [PATCH v5 06/20] iscsi: Switch iscsi_allocmap_update() to b


From: Eric Blake
Subject: [Qemu-block] [PATCH v5 06/20] iscsi: Switch iscsi_allocmap_update() to byte-based
Date: Thu, 30 Nov 2017 19:42:28 -0600

We are gradually converting to byte-based interfaces, as they are
easier to reason about than sector-based.  Convert all uses of
the allocmap (no semantic change).  Callers that already had bytes
available are simpler, and callers that now scale to bytes will be
easier to switch to byte-based in the future.

Signed-off-by: Eric Blake <address@hidden>
Acked-by: Paolo Bonzini <address@hidden>

---
v3-v5: no change
v2: rebase to count/bytes rename
---
 block/iscsi.c | 90 +++++++++++++++++++++++++++++------------------------------
 1 file changed, 44 insertions(+), 46 deletions(-)

diff --git a/block/iscsi.c b/block/iscsi.c
index 8f903d8370..4896d50d6e 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -455,24 +455,22 @@ static int iscsi_allocmap_init(IscsiLun *iscsilun, int 
open_flags)
 }

 static void
-iscsi_allocmap_update(IscsiLun *iscsilun, int64_t sector_num,
-                      int nb_sectors, bool allocated, bool valid)
+iscsi_allocmap_update(IscsiLun *iscsilun, int64_t offset,
+                      int64_t bytes, bool allocated, bool valid)
 {
     int64_t cl_num_expanded, nb_cls_expanded, cl_num_shrunk, nb_cls_shrunk;
-    int cluster_sectors = iscsilun->cluster_size >> BDRV_SECTOR_BITS;

     if (iscsilun->allocmap == NULL) {
         return;
     }
     /* expand to entirely contain all affected clusters */
-    assert(cluster_sectors);
-    cl_num_expanded = sector_num / cluster_sectors;
-    nb_cls_expanded = DIV_ROUND_UP(sector_num + nb_sectors,
-                                   cluster_sectors) - cl_num_expanded;
+    assert(iscsilun->cluster_size);
+    cl_num_expanded = offset / iscsilun->cluster_size;
+    nb_cls_expanded = DIV_ROUND_UP(offset + bytes, iscsilun->cluster_size)
+        - cl_num_expanded;
     /* shrink to touch only completely contained clusters */
-    cl_num_shrunk = DIV_ROUND_UP(sector_num, cluster_sectors);
-    nb_cls_shrunk = (sector_num + nb_sectors) / cluster_sectors
-                      - cl_num_shrunk;
+    cl_num_shrunk = DIV_ROUND_UP(offset, iscsilun->cluster_size);
+    nb_cls_shrunk = (offset + bytes) / iscsilun->cluster_size - cl_num_shrunk;
     if (allocated) {
         bitmap_set(iscsilun->allocmap, cl_num_expanded, nb_cls_expanded);
     } else {
@@ -495,26 +493,26 @@ iscsi_allocmap_update(IscsiLun *iscsilun, int64_t 
sector_num,
 }

 static void
-iscsi_allocmap_set_allocated(IscsiLun *iscsilun, int64_t sector_num,
-                             int nb_sectors)
+iscsi_allocmap_set_allocated(IscsiLun *iscsilun, int64_t offset,
+                             int64_t bytes)
 {
-    iscsi_allocmap_update(iscsilun, sector_num, nb_sectors, true, true);
+    iscsi_allocmap_update(iscsilun, offset, bytes, true, true);
 }

 static void
-iscsi_allocmap_set_unallocated(IscsiLun *iscsilun, int64_t sector_num,
-                               int nb_sectors)
+iscsi_allocmap_set_unallocated(IscsiLun *iscsilun, int64_t offset,
+                               int64_t bytes)
 {
     /* Note: if cache.direct=on the fifth argument to iscsi_allocmap_update
      * is ignored, so this will in effect be an iscsi_allocmap_set_invalid.
      */
-    iscsi_allocmap_update(iscsilun, sector_num, nb_sectors, false, true);
+    iscsi_allocmap_update(iscsilun, offset, bytes, false, true);
 }

-static void iscsi_allocmap_set_invalid(IscsiLun *iscsilun, int64_t sector_num,
-                                       int nb_sectors)
+static void iscsi_allocmap_set_invalid(IscsiLun *iscsilun, int64_t offset,
+                                       int64_t bytes)
 {
-    iscsi_allocmap_update(iscsilun, sector_num, nb_sectors, false, false);
+    iscsi_allocmap_update(iscsilun, offset, bytes, false, false);
 }

 static void iscsi_allocmap_invalidate(IscsiLun *iscsilun)
@@ -528,34 +526,30 @@ static void iscsi_allocmap_invalidate(IscsiLun *iscsilun)
 }

 static inline bool
-iscsi_allocmap_is_allocated(IscsiLun *iscsilun, int64_t sector_num,
-                            int nb_sectors)
+iscsi_allocmap_is_allocated(IscsiLun *iscsilun, int64_t offset,
+                            int64_t bytes)
 {
     unsigned long size;
     if (iscsilun->allocmap == NULL) {
         return true;
     }
     assert(iscsilun->cluster_size);
-    size = DIV_ROUND_UP(sector_num + nb_sectors,
-                        iscsilun->cluster_size >> BDRV_SECTOR_BITS);
+    size = DIV_ROUND_UP(offset + bytes, iscsilun->cluster_size);
     return !(find_next_bit(iscsilun->allocmap, size,
-                           sector_num * BDRV_SECTOR_SIZE /
-                           iscsilun->cluster_size) == size);
+                           offset / iscsilun->cluster_size) == size);
 }

 static inline bool iscsi_allocmap_is_valid(IscsiLun *iscsilun,
-                                           int64_t sector_num, int nb_sectors)
+                                           int64_t offset, int64_t bytes)
 {
     unsigned long size;
     if (iscsilun->allocmap_valid == NULL) {
         return false;
     }
     assert(iscsilun->cluster_size);
-    size = DIV_ROUND_UP(sector_num + nb_sectors,
-                        iscsilun->cluster_size >> BDRV_SECTOR_BITS);
+    size = DIV_ROUND_UP(offset + bytes, iscsilun->cluster_size);
     return (find_next_zero_bit(iscsilun->allocmap_valid, size,
-                               sector_num * BDRV_SECTOR_SIZE /
-                               iscsilun->cluster_size) == size);
+                               offset / iscsilun->cluster_size) == size);
 }

 static int coroutine_fn
@@ -637,12 +631,14 @@ retry:
     }

     if (iTask.status != SCSI_STATUS_GOOD) {
-        iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors);
+        iscsi_allocmap_set_invalid(iscsilun, sector_num * BDRV_SECTOR_SIZE,
+                                   nb_sectors * BDRV_SECTOR_SIZE);
         r = iTask.err_code;
         goto out_unlock;
     }

-    iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors);
+    iscsi_allocmap_set_allocated(iscsilun, sector_num * BDRV_SECTOR_SIZE,
+                                 nb_sectors * BDRV_SECTOR_SIZE);

 out_unlock:
     qemu_mutex_unlock(&iscsilun->mutex);
@@ -737,9 +733,11 @@ retry:
     }

     if (ret & BDRV_BLOCK_ZERO) {
-        iscsi_allocmap_set_unallocated(iscsilun, sector_num, *pnum);
+        iscsi_allocmap_set_unallocated(iscsilun, sector_num * BDRV_SECTOR_SIZE,
+                                       *pnum * BDRV_SECTOR_SIZE);
     } else {
-        iscsi_allocmap_set_allocated(iscsilun, sector_num, *pnum);
+        iscsi_allocmap_set_allocated(iscsilun, sector_num * BDRV_SECTOR_SIZE,
+                                     *pnum * BDRV_SECTOR_SIZE);
     }

     if (*pnum > nb_sectors) {
@@ -777,15 +775,19 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState 
*bs,
     /* if cache.direct is off and we have a valid entry in our allocation map
      * we can skip checking the block status and directly return zeroes if
      * the request falls within an unallocated area */
-    if (iscsi_allocmap_is_valid(iscsilun, sector_num, nb_sectors) &&
-        !iscsi_allocmap_is_allocated(iscsilun, sector_num, nb_sectors)) {
+    if (iscsi_allocmap_is_valid(iscsilun, sector_num * BDRV_SECTOR_SIZE,
+                                nb_sectors * BDRV_SECTOR_SIZE) &&
+        !iscsi_allocmap_is_allocated(iscsilun, sector_num * BDRV_SECTOR_SIZE,
+                                     nb_sectors * BDRV_SECTOR_SIZE)) {
             qemu_iovec_memset(iov, 0, 0x00, iov->size);
             return 0;
     }

     if (nb_sectors >= ISCSI_CHECKALLOC_THRES &&
-        !iscsi_allocmap_is_valid(iscsilun, sector_num, nb_sectors) &&
-        !iscsi_allocmap_is_allocated(iscsilun, sector_num, nb_sectors)) {
+        !iscsi_allocmap_is_valid(iscsilun, sector_num * BDRV_SECTOR_SIZE,
+                                 nb_sectors * BDRV_SECTOR_SIZE) &&
+        !iscsi_allocmap_is_allocated(iscsilun, sector_num * BDRV_SECTOR_SIZE,
+                                     nb_sectors * BDRV_SECTOR_SIZE)) {
         int pnum;
         BlockDriverState *file;
         /* check the block status from the beginning of the cluster
@@ -1154,8 +1156,7 @@ retry:
         goto out_unlock;
     }

-    iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
-                               bytes >> BDRV_SECTOR_BITS);
+    iscsi_allocmap_set_invalid(iscsilun, offset, bytes);

 out_unlock:
     qemu_mutex_unlock(&iscsilun->mutex);
@@ -1253,18 +1254,15 @@ retry:
     }

     if (iTask.status != SCSI_STATUS_GOOD) {
-        iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
-                                   bytes >> BDRV_SECTOR_BITS);
+        iscsi_allocmap_set_invalid(iscsilun, offset, bytes);
         r = iTask.err_code;
         goto out_unlock;
     }

     if (flags & BDRV_REQ_MAY_UNMAP) {
-        iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
-                                   bytes >> BDRV_SECTOR_BITS);
+        iscsi_allocmap_set_invalid(iscsilun, offset, bytes);
     } else {
-        iscsi_allocmap_set_allocated(iscsilun, offset >> BDRV_SECTOR_BITS,
-                                     bytes >> BDRV_SECTOR_BITS);
+        iscsi_allocmap_set_allocated(iscsilun, offset, bytes);
     }

 out_unlock:
-- 
2.14.3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]