qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v8 9/9] virtio-pci: add support for configure interrupt


From: Jason Wang
Subject: Re: [PATCH v8 9/9] virtio-pci: add support for configure interrupt
Date: Fri, 9 Jul 2021 14:03:10 +0800
User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Thunderbird/78.11.0


在 2021/7/6 下午3:20, Cindy Lu 写道:
Add support for configure interrupt, use kvm_irqfd_assign and set the
gsi to kernel. When the configure notifier was eventfd_signal by host
kernel, this will finally inject an msix interrupt to guest

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
  hw/virtio/virtio-pci.c | 60 +++++++++++++++++++++++++++++++++++-------
  1 file changed, 50 insertions(+), 10 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index e43d5760ee..73b5ffd1b8 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -717,7 +717,8 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, 
int queue_no,
      VirtQueue *vq;
if (queue_no == VIRTIO_CONFIG_IRQ_IDX) {
-        return -1;
+        *n = virtio_get_config_notifier(vdev);
+        *vector = vdev->config_vector;


Is there a case that the vector is not vdev->config_vector? If not, we probably don't need this.


      } else {
          if (!virtio_queue_get_num(vdev, queue_no)) {
              return -1;
@@ -764,6 +765,10 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy 
*proxy, int nvqs)
      return ret;
  }
+static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
+{
+    return kvm_virtio_pci_vector_use_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
+}
static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
                          int queue_no)
@@ -792,6 +797,30 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy 
*proxy, int nvqs)
      }
  }
+static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)


"kvm_virtio_pci_config_vector_relase" please, consider we've already had:

kvm_virtio_pci_vq_vector_release().

Thanks


+{
+    kvm_virtio_pci_vector_release_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
+}



Blank line is needed.


+static int virtio_pci_set_config_notifier(DeviceState *d, bool assign)
+{
+    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+    EventNotifier *notifier = virtio_get_config_notifier(vdev);
+    int r = 0;
+    if (assign) {
+        r = event_notifier_init(notifier, 0);
+        if (r < 0) {
+            return r;
+        }
+        virtio_set_config_notifier_fd_handler(vdev, true, true);
+        kvm_virtio_pci_vector_config_use(proxy);
+    } else {
+        virtio_set_config_notifier_fd_handler(vdev, false, true);
+        kvm_virtio_pci_vector_config_release(proxy);
+        event_notifier_cleanup(notifier);
+    }


We check MSIX in virtio_pci_set_guest_notifier but not here, any reason for this?

And I think we need consider to reuse the code in virtio_pci_set_guest_notifier().


+    return r;
+}
  static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy,
                                         unsigned int queue_no,
                                         unsigned int vector,
@@ -873,7 +902,12 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, 
unsigned vector,
          }
          vq = virtio_vector_next_queue(vq);
      }
-
+    n = virtio_get_config_notifier(vdev);
+    ret = virtio_pci_one_vector_unmask(proxy, VIRTIO_CONFIG_IRQ_IDX,
+                        vector, msg, n);
+    if (ret < 0) {
+        goto undo;
+    }
      return 0;
undo:
@@ -909,6 +943,8 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned 
vector)
          }
          vq = virtio_vector_next_queue(vq);
      }
+    n = virtio_get_config_notifier(vdev);
+    virtio_pci_one_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n);
  }
static void virtio_pci_vector_poll(PCIDevice *dev,
@@ -921,19 +957,17 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
      int queue_no;
      unsigned int vector;
      EventNotifier *notifier;
-    VirtQueue *vq;
-
-    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
-        if (!virtio_queue_get_num(vdev, queue_no)) {
+    int ret;
+    for (queue_no = VIRTIO_CONFIG_IRQ_IDX;
+            queue_no < proxy->nvqs_with_notifiers; queue_no++) {
+        ret = virtio_pci_get_notifier(proxy, queue_no, &notifier, &vector);
+        if (ret < 0) {
              break;
          }
-        vector = virtio_queue_vector(vdev, queue_no);
          if (vector < vector_start || vector >= vector_end ||
              !msix_is_masked(dev, vector)) {
              continue;
          }
-        vq = virtio_get_queue(vdev, queue_no);
-        notifier = virtio_queue_get_guest_notifier(vq);
          if (k->guest_notifier_pending) {
              if (k->guest_notifier_pending(vdev, queue_no)) {
                  msix_set_pending(dev, vector);
@@ -1002,6 +1036,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, 
int nvqs, bool assign)
          msix_unset_vector_notifiers(&proxy->pci_dev);
          if (proxy->vector_irqfd) {
              kvm_virtio_pci_vector_release(proxy, nvqs);
+            kvm_virtio_pci_vector_config_release(proxy);
              g_free(proxy->vector_irqfd);
              proxy->vector_irqfd = NULL;
          }
@@ -1029,6 +1064,10 @@ static int virtio_pci_set_guest_notifiers(DeviceState 
*d, int nvqs, bool assign)
                  goto assign_error;
              }
          }
+        r = virtio_pci_set_config_notifier(d, assign);
+        if (r < 0) {
+            goto config_error;
+        }
          r = msix_set_vector_notifiers(&proxy->pci_dev,
                                        virtio_pci_vector_unmask,
                                        virtio_pci_vector_mask,
@@ -1045,7 +1084,8 @@ notifiers_error:
          assert(assign);
          kvm_virtio_pci_vector_release(proxy, nvqs);
      }
-
+config_error:
+    kvm_virtio_pci_vector_config_release(proxy);
  assign_error:
      /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. 
*/
      assert(assign);




reply via email to

[Prev in Thread] Current Thread [Next in Thread]