qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 3/4] Introduce block dma helpers


From: Avi Kivity
Subject: [Qemu-devel] [PATCH 3/4] Introduce block dma helpers
Date: Thu, 5 Feb 2009 11:33:58 +0200

These helpers perform read/write requests on entire scatter/gather lists,
relieving the device emulation code from mapping and unmapping physical
memory, and from looping when map resources are exhausted.

Signed-off-by: Avi Kivity <address@hidden>
---
 dma-helpers.c |  119 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 dma.h         |    8 ++++
 2 files changed, 126 insertions(+), 1 deletions(-)

diff --git a/dma-helpers.c b/dma-helpers.c
index 11ad3a4..b2ade19 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -8,7 +8,7 @@
  */
 
 #include "dma.h"
-
+#include "block_int.h"
 
 void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
 {
@@ -36,3 +36,120 @@ void qemu_sglist_destroy(QEMUSGList *qsg)
     qemu_free(qsg->sg);
 }
 
+typedef struct {
+    BlockDriverState *bs;
+    BlockDriverAIOCB *acb;
+    QEMUSGList *sg;
+    uint64_t sector_num;
+    int is_write;
+    int sg_cur_index;
+    target_phys_addr_t sg_cur_byte;
+    QEMUIOVector iov;
+    QEMUBH *bh;
+} DMABlockState;
+
+static void dma_bdrv_cb(void *opaque, int ret);
+
+static void reschedule_dma(void *opaque)
+{
+    DMABlockState *dbs = (DMABlockState *)opaque;
+
+    qemu_bh_delete(dbs->bh);
+    dbs->bh = NULL;
+    dma_bdrv_cb(opaque, 0);
+}
+
+static void continue_after_map_failure(void *opaque)
+{
+    DMABlockState *dbs = (DMABlockState *)opaque;
+
+    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
+    qemu_bh_schedule(dbs->bh);
+}
+
+static void dma_bdrv_cb(void *opaque, int ret)
+{
+    DMABlockState *dbs = (DMABlockState *)opaque;
+    target_phys_addr_t cur_addr, cur_len;
+    void *mem;
+    int i;
+
+    dbs->sector_num += dbs->iov.size / 512;
+    for (i = 0; i < dbs->iov.niov; ++i) {
+        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
+                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
+                                  dbs->iov.iov[i].iov_len);
+    }
+    qemu_iovec_reset(&dbs->iov);
+
+    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
+        dbs->acb->cb(dbs->acb->opaque, ret);
+        qemu_iovec_destroy(&dbs->iov);
+        qemu_aio_release(dbs->acb);
+        qemu_free(dbs);
+        return;
+    }
+
+    while (dbs->sg_cur_index < dbs->sg->nsg) {
+        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
+        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
+        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
+        if (!mem)
+            break;
+        qemu_iovec_add(&dbs->iov, mem, cur_len);
+        dbs->sg_cur_byte += cur_len;
+        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
+            dbs->sg_cur_byte = 0;
+            ++dbs->sg_cur_index;
+        }
+    }
+
+    if (dbs->iov.size == 0) {
+        cpu_register_map_client(dbs, continue_after_map_failure);
+        return;
+    }
+
+    if (dbs->is_write) {
+        bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
+                        dbs->iov.size / 512, dma_bdrv_cb, dbs);
+    } else {
+        bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
+                       dbs->iov.size / 512, dma_bdrv_cb, dbs);
+    }
+}
+
+static BlockDriverAIOCB *dma_bdrv_io(
+    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
+    BlockDriverCompletionFunc *cb, void *opaque,
+    int is_write)
+{
+    DMABlockState *dbs = qemu_malloc(sizeof(*dbs));
+
+    dbs->bs = bs;
+    dbs->acb = qemu_aio_get(bs, cb, opaque);
+    dbs->sg = sg;
+    dbs->sector_num = sector_num;
+    dbs->sg_cur_index = 0;
+    dbs->sg_cur_byte = 0;
+    dbs->is_write = is_write;
+    dbs->bh = NULL;
+    qemu_iovec_init(&dbs->iov, sg->nsg);
+    dma_bdrv_cb(dbs, 0);
+    return dbs->acb;
+}
+
+
+BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
+                                QEMUSGList *sg, uint64_t sector,
+                                void (*cb)(void *opaque, int ret), void 
*opaque)
+{
+    return dma_bdrv_io(bs, sg, sector, cb, opaque, 0);
+}
+
+BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
+                                 QEMUSGList *sg, uint64_t sector,
+                                 void (*cb)(void *opaque, int ret), void 
*opaque)
+{
+    return dma_bdrv_io(bs, sg, sector, cb, opaque, 1);
+}
+
diff --git a/dma.h b/dma.h
index 16af50f..d596717 100644
--- a/dma.h
+++ b/dma.h
@@ -12,6 +12,7 @@
 
 #include <stdio.h>
 #include "cpu.h"
+#include "block.h"
 
 typedef struct {
     target_phys_addr_t base;
@@ -30,4 +31,11 @@ void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t 
base,
                      target_phys_addr_t len);
 void qemu_sglist_destroy(QEMUSGList *qsg);
 
+BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
+                                QEMUSGList *sg, uint64_t sector,
+                                BlockDriverCompletionFunc *cb, void *opaque);
+BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
+                                 QEMUSGList *sg, uint64_t sector,
+                                 BlockDriverCompletionFunc *cb, void *opaque);
+
 #endif
-- 
1.6.1.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]