qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 7/9] util/vfio-helpers: Have qemu_vfio_dma_map() propagate Error


From: Philippe Mathieu-Daudé
Subject: [PATCH 7/9] util/vfio-helpers: Have qemu_vfio_dma_map() propagate Error
Date: Tue, 24 Aug 2021 16:11:40 +0200

Now that all qemu_vfio_dma_map() callers provide an Error* argument,
fill it with relevant / more descriptive message. Reduce 'ret'
(returned value) scope by returning errno directly to simplify
(removing the goto / 'out' label).

Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
 block/nvme.c        |  1 +
 util/vfio-helpers.c | 31 ++++++++++++++-----------------
 2 files changed, 15 insertions(+), 17 deletions(-)

diff --git a/block/nvme.c b/block/nvme.c
index 663e5d918fa..80546b0babd 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -240,6 +240,7 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState 
*s,
     r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes,
                           false, &prp_list_iova, errp);
     if (r) {
+        error_prepend(errp, "Cannot map buffer for DMA: ");
         goto fail;
     }
     q->free_req_head = -1;
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
index 3e1a49bea15..f4c16e1dae5 100644
--- a/util/vfio-helpers.c
+++ b/util/vfio-helpers.c
@@ -729,7 +729,6 @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, 
uint64_t *iova)
 int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
                       bool temporary, uint64_t *iova, Error **errp)
 {
-    int ret = 0;
     int index;
     IOVAMapping *mapping;
     uint64_t iova0;
@@ -742,32 +741,34 @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, 
size_t size,
     if (mapping) {
         iova0 = mapping->iova + ((uint8_t *)host - (uint8_t *)mapping->host);
     } else {
+        int ret;
+
         if (s->high_water_mark - s->low_water_mark + 1 < size) {
-            ret = -ENOMEM;
-            goto out;
+            error_setg(errp, "iova exhausted (water mark reached)");
+            return -ENOMEM;
         }
         if (!temporary) {
-            if (qemu_vfio_find_fixed_iova(s, size, &iova0)) {
-                ret = -ENOMEM;
-                goto out;
+            if (qemu_vfio_find_fixed_iova(s, size, &iova0) < 0) {
+                error_setg(errp, "iova range not found");
+                return -ENOMEM;
             }
 
             mapping = qemu_vfio_add_mapping(s, host, size, index + 1, iova0);
             assert(qemu_vfio_verify_mappings(s));
             ret = qemu_vfio_do_mapping(s, host, size, iova0);
-            if (ret) {
+            if (ret < 0) {
                 qemu_vfio_undo_mapping(s, mapping, NULL);
-                goto out;
+                return ret;
             }
             qemu_vfio_dump_mappings(s);
         } else {
             if (qemu_vfio_find_temp_iova(s, size, &iova0)) {
-                ret = -ENOMEM;
-                goto out;
+                error_setg(errp, "iova range not found");
+                return -ENOMEM;
             }
             ret = qemu_vfio_do_mapping(s, host, size, iova0);
-            if (ret) {
-                goto out;
+            if (ret < 0) {
+                return ret;
             }
         }
     }
@@ -775,11 +776,7 @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t 
size,
     if (iova) {
         *iova = iova0;
     }
-out:
-    if (ret) {
-        error_setg_errno(errp, -ret, "Cannot map buffer for DMA");
-    }
-    return ret;
+    return 0;
 }
 
 /* Reset the high watermark and free all "temporary" mappings. */
-- 
2.31.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]