qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH RFC 13/19] vfio-user: respond to remote DMA read/write requests


From: Elena Ufimtseva
Subject: [PATCH RFC 13/19] vfio-user: respond to remote DMA read/write requests
Date: Sun, 18 Jul 2021 23:27:52 -0700

From: John G Johnson <john.g.johnson@oracle.com>

Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
Signed-off-by: John G Johnson <john.g.johnson@oracle.com>
Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
---
 hw/vfio/user.h | 16 ++++++++++++
 hw/vfio/pci.c  | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++
 hw/vfio/user.c | 21 +++++++++++++++-
 3 files changed, 103 insertions(+), 1 deletion(-)

diff --git a/hw/vfio/user.h b/hw/vfio/user.h
index 351fdb3ee1..d08d94ed92 100644
--- a/hw/vfio/user.h
+++ b/hw/vfio/user.h
@@ -206,6 +206,17 @@ struct vfio_user_region_info {
     uint64_t offset;
 };
 
+/*
+ * VFIO_USER_DMA_READ
+ * VFIO_USER_DMA_WRITE
+ */
+struct vfio_user_dma_rw {
+    vfio_user_hdr_t hdr;
+    uint64_t offset;
+    uint32_t count;
+    char data[];
+};
+
 void vfio_user_recv(void *opaque);
 void vfio_user_send_reply(VFIOProxy *proxy, char *buf, int ret);
 VFIOProxy *vfio_user_connect_dev(char *sockname, Error **errp);
@@ -224,4 +235,9 @@ int vfio_user_dma_unmap(VFIOProxy *proxy,
                         struct vfio_bitmap *bitmap);
 int vfio_user_get_region_info(VFIODevice *vbasedev, int index,
                               struct vfio_region_info *info, VFIOUserFDs *fds);
+uint64_t vfio_user_max_xfer(void);
+void vfio_user_set_reqhandler(VFIODevice *vbasdev,
+                              int (*handler)(void *opaque, char *buf,
+                                             VFIOUserFDs *fds),
+                                             void *reqarg);
 #endif /* VFIO_USER_H */
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index a8d2e59470..7042c178dd 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -3347,6 +3347,72 @@ static void register_vfio_pci_dev_type(void)
 
 type_init(register_vfio_pci_dev_type)
 
+static int vfio_user_dma_read(VFIOPCIDevice *vdev, struct vfio_user_dma_rw 
*msg)
+{
+    PCIDevice *pdev = &vdev->pdev;
+    char *buf;
+    int size = msg->count + sizeof(struct vfio_user_dma_rw);
+
+    if (msg->hdr.flags & VFIO_USER_NO_REPLY) {
+        return -EINVAL;
+    }
+    if (msg->count > vfio_user_max_xfer()) {
+        return -E2BIG;
+    }
+
+    buf = g_malloc0(size);
+    memcpy(buf, msg, sizeof(*msg));
+
+    pci_dma_read(pdev, msg->offset, buf + sizeof(*msg), msg->count);
+
+    vfio_user_send_reply(vdev->vbasedev.proxy, buf, size);
+    g_free(buf);
+    return 0;
+}
+
+static int vfio_user_dma_write(VFIOPCIDevice *vdev,
+                               struct vfio_user_dma_rw *msg)
+{
+    PCIDevice *pdev = &vdev->pdev;
+    char *buf = (char *)msg + sizeof(*msg);
+
+    /* make sure transfer count isn't larger than the message data */
+    if (msg->count > msg->hdr.size - sizeof(*msg)) {
+        return -E2BIG;
+    }
+
+    pci_dma_write(pdev, msg->offset, buf, msg->count);
+
+    if ((msg->hdr.flags & VFIO_USER_NO_REPLY) == 0) {
+        vfio_user_send_reply(vdev->vbasedev.proxy, (char *)msg,
+                             sizeof(msg->hdr));
+    }
+    return 0;
+}
+
+static int vfio_user_pci_process_req(void *opaque, char *buf, VFIOUserFDs *fds)
+{
+    VFIOPCIDevice *vdev = opaque;
+    vfio_user_hdr_t *hdr = (vfio_user_hdr_t *)buf;
+    int ret;
+
+    if (fds->recv_fds != 0) {
+        return -EINVAL;
+    }
+    switch (hdr->command) {
+    case VFIO_USER_DMA_READ:
+        ret = vfio_user_dma_read(vdev, (struct vfio_user_dma_rw *)hdr);
+        break;
+    case VFIO_USER_DMA_WRITE:
+        ret = vfio_user_dma_write(vdev, (struct vfio_user_dma_rw *)hdr);
+        break;
+    default:
+        error_printf("vfio_user_process_req unknown cmd %d\n", hdr->command);
+        ret = -ENOSYS;
+    }
+    return ret;
+}
+
 /*
  * Emulated devices don't use host hot reset
  */
@@ -3392,6 +3458,7 @@ static void vfio_user_pci_realize(PCIDevice *pdev, Error 
**errp)
         return;
     }
     vbasedev->proxy = proxy;
+    vfio_user_set_reqhandler(vbasedev, vfio_user_pci_process_req, vdev);
 
     if (udev->secure) {
         proxy->flags |= VFIO_PROXY_SECURE;
diff --git a/hw/vfio/user.c b/hw/vfio/user.c
index eea8b9b402..8bedbc19f3 100644
--- a/hw/vfio/user.c
+++ b/hw/vfio/user.c
@@ -42,6 +42,11 @@ static void vfio_user_request_msg(vfio_user_hdr_t *hdr, 
uint16_t cmd,
 static void vfio_user_send_recv(VFIOProxy *proxy, vfio_user_hdr_t *msg,
                                 VFIOUserFDs *fds, int rsize);
 
+uint64_t vfio_user_max_xfer(void)
+{
+    return max_xfer_size;
+}
+
 static void vfio_user_shutdown(VFIOProxy *proxy)
 {
     qio_channel_shutdown(proxy->ioc, QIO_CHANNEL_SHUTDOWN_READ, NULL);
@@ -236,7 +241,7 @@ void vfio_user_recv(void *opaque)
         *reply->msg = msg;
         data = (char *)reply->msg + sizeof(msg);
     } else {
-        if (msg.size > max_xfer_size) {
+        if (msg.size > max_xfer_size + sizeof(struct vfio_user_dma_rw)) {
             error_setg(&local_err, "vfio_user_recv request larger than max");
             goto fatal;
         }
@@ -779,3 +784,17 @@ int vfio_user_get_region_info(VFIODevice *vbasedev, int 
index,
     memcpy(info, &msgp->argsz, info->argsz);
     return 0;
 }
+
+void vfio_user_set_reqhandler(VFIODevice *vbasedev,
+                              int (*handler)(void *opaque, char *buf,
+                                             VFIOUserFDs *fds),
+                              void *reqarg)
+{
+    VFIOProxy *proxy = vbasedev->proxy;
+
+    proxy->request = handler;
+    proxy->reqarg = reqarg;
+    qio_channel_set_aio_fd_handler(proxy->ioc,
+                                   
iothread_get_aio_context(vfio_user_iothread),
+                                   vfio_user_recv, NULL, vbasedev);
+}
-- 
2.25.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]