qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PULL v3 12/55] virtio-pci: add support for configure interrupt


From: Michael S. Tsirkin
Subject: Re: [PULL v3 12/55] virtio-pci: add support for configure interrupt
Date: Sun, 9 Jan 2022 10:54:46 -0500

On Sun, Jan 09, 2022 at 02:33:43PM +0100, Cédric Le Goater wrote:
> Hello Cindy,
> 
> On 1/8/22 02:04, Michael S. Tsirkin wrote:
> > From: Cindy Lu <lulu@redhat.com>
> > 
> > Add support for configure interrupt, The process is used kvm_irqfd_assign
> > to set the gsi to kernel. When the configure notifier was signal by
> > host, qemu will inject a msix interrupt to guest
> > 
> > Signed-off-by: Cindy Lu <lulu@redhat.com>
> > Message-Id: <20211104164827.21911-11-lulu@redhat.com>
> > Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
> > Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> > ---
> >   hw/virtio/virtio-pci.h |  4 +-
> >   hw/virtio/virtio-pci.c | 92 ++++++++++++++++++++++++++++++++++++------
> >   2 files changed, 83 insertions(+), 13 deletions(-)
> > 
> > diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
> > index 2446dcd9ae..b704acc5a8 100644
> > --- a/hw/virtio/virtio-pci.h
> > +++ b/hw/virtio/virtio-pci.h
> > @@ -251,5 +251,7 @@ void virtio_pci_types_register(const 
> > VirtioPCIDeviceTypeInfo *t);
> >    * @fixed_queues.
> >    */
> >   unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues);
> > -
> > +void virtio_pci_set_guest_notifier_fd_handler(VirtIODevice *vdev, 
> > VirtQueue *vq,
> > +                                              int n, bool assign,
> > +                                              bool with_irqfd);
> >   #endif
> > diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> > index 7201cf3dc1..98fb5493ae 100644
> > --- a/hw/virtio/virtio-pci.c
> > +++ b/hw/virtio/virtio-pci.c
> > @@ -727,7 +727,8 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy 
> > *proxy, int queue_no,
> >       VirtQueue *vq;
> >       if (queue_no == VIRTIO_CONFIG_IRQ_IDX) {
> > -        return -1;
> > +        *n = virtio_config_get_guest_notifier(vdev);
> > +        *vector = vdev->config_vector;
> >       } else {
> >           if (!virtio_queue_get_num(vdev, queue_no)) {
> >               return -1;
> > @@ -802,6 +803,10 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy 
> > *proxy, int nvqs)
> >       return ret;
> >   }
> > +static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
> > +{
> > +    return kvm_virtio_pci_vector_use_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
> > +}
> >   static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
> >                                                 int queue_no)
> > @@ -839,6 +844,11 @@ static void 
> > kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> >       }
> >   }
> > +static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)
> > +{
> > +    kvm_virtio_pci_vector_release_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
> > +}
> > +
> >   static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy,
> >                                          unsigned int queue_no,
> >                                          unsigned int vector,
> > @@ -920,9 +930,17 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, 
> > unsigned vector,
> >           }
> >           vq = virtio_vector_next_queue(vq);
> >       }
> > -
> > +    /* unmask config intr */
> > +    n = virtio_config_get_guest_notifier(vdev);
> > +    ret = virtio_pci_one_vector_unmask(proxy, VIRTIO_CONFIG_IRQ_IDX, 
> > vector,
> > +                                       msg, n);
> > +    if (ret < 0) {
> > +        goto undo_config;
> > +    }
> >       return 0;
> > -
> > +undo_config:
> > +    n = virtio_config_get_guest_notifier(vdev);
> > +    virtio_pci_one_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n);
> >   undo:
> >       vq = virtio_vector_first_queue(vdev, vector);
> >       while (vq && unmasked >= 0) {
> > @@ -956,6 +974,8 @@ static void virtio_pci_vector_mask(PCIDevice *dev, 
> > unsigned vector)
> >           }
> >           vq = virtio_vector_next_queue(vq);
> >       }
> > +    n = virtio_config_get_guest_notifier(vdev);
> > +    virtio_pci_one_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n);
> >   }
> >   static void virtio_pci_vector_poll(PCIDevice *dev,
> > @@ -987,6 +1007,34 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
> >               msix_set_pending(dev, vector);
> >           }
> >       }
> > +    /* poll the config intr */
> > +    ret = virtio_pci_get_notifier(proxy, VIRTIO_CONFIG_IRQ_IDX, &notifier,
> > +                                  &vector);
> > +    if (ret < 0) {
> > +        return;
> > +    }
> > +    if (vector < vector_start || vector >= vector_end ||
> > +        !msix_is_masked(dev, vector)) {
> > +        return;
> > +    }
> > +    if (k->guest_notifier_pending) {
> > +        if (k->guest_notifier_pending(vdev, VIRTIO_CONFIG_IRQ_IDX)) {
> > +            msix_set_pending(dev, vector);
> > +        }
> > +    } else if (event_notifier_test_and_clear(notifier)) {
> > +        msix_set_pending(dev, vector);
> > +    }
> > +}
> > +
> > +void virtio_pci_set_guest_notifier_fd_handler(VirtIODevice *vdev, 
> > VirtQueue *vq,
> > +                                              int n, bool assign,
> > +                                              bool with_irqfd)
> > +{
> > +    if (n == VIRTIO_CONFIG_IRQ_IDX) {
> > +        virtio_config_set_guest_notifier_fd_handler(vdev, assign, 
> > with_irqfd);
> > +    } else {
> > +        virtio_queue_set_guest_notifier_fd_handler(vq, assign, with_irqfd);
> > +    }
> >   }
> >   static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool 
> > assign,
> > @@ -995,17 +1043,25 @@ static int virtio_pci_set_guest_notifier(DeviceState 
> > *d, int n, bool assign,
> >       VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
> >       VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> >       VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
> > -    VirtQueue *vq = virtio_get_queue(vdev, n);
> > -    EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
> > +    VirtQueue *vq = NULL;
> > +    EventNotifier *notifier = NULL;
> > +
> > +    if (n == VIRTIO_CONFIG_IRQ_IDX) {
> > +        notifier = virtio_config_get_guest_notifier(vdev);
> > +    } else {
> > +        vq = virtio_get_queue(vdev, n);
> > +        notifier = virtio_queue_get_guest_notifier(vq);
> > +    }
> >       if (assign) {
> >           int r = event_notifier_init(notifier, 0);
> >           if (r < 0) {
> >               return r;
> >           }
> > -        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
> > +        virtio_pci_set_guest_notifier_fd_handler(vdev, vq, n, true, 
> > with_irqfd);
> >       } else {
> > -        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
> > +        virtio_pci_set_guest_notifier_fd_handler(vdev, vq, n, false,
> > +                                                 with_irqfd);
> >           event_notifier_cleanup(notifier);
> >       }
> > @@ -1047,6 +1103,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState 
> > *d, int nvqs, bool assign)
> >           msix_unset_vector_notifiers(&proxy->pci_dev);
> >           if (proxy->vector_irqfd) {
> >               kvm_virtio_pci_vector_release(proxy, nvqs);
> > +            kvm_virtio_pci_vector_config_release(proxy);
> >               g_free(proxy->vector_irqfd);
> >               proxy->vector_irqfd = NULL;
> >           }
> > @@ -1062,7 +1119,11 @@ static int 
> > virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
> >               goto assign_error;
> >           }
> >       }
> > -
> > +    r = virtio_pci_set_guest_notifier(d, VIRTIO_CONFIG_IRQ_IDX, assign,
> > +                                      with_irqfd);
> > +    if (r < 0) {
> > +        goto config_assign_error;
> > +    }
> >       /* Must set vector notifier after guest notifier has been assigned */
> >       if ((with_irqfd || k->guest_notifier_mask) && assign) {
> >           if (with_irqfd) {
> > @@ -1071,11 +1132,14 @@ static int 
> > virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
> >                             msix_nr_vectors_allocated(&proxy->pci_dev));
> >               r = kvm_virtio_pci_vector_use(proxy, nvqs);
> >               if (r < 0) {
> > -                goto assign_error;
> > +                goto config_assign_error;
> >               }
> >           }
> > -        r = msix_set_vector_notifiers(&proxy->pci_dev,
> > -                                      virtio_pci_vector_unmask,
> > +        r = kvm_virtio_pci_vector_config_use(proxy);
> > +        if (r < 0) {
> > +            goto config_error;
> > +        }
> 
> 
> This is crashing a QEMU TCG machine using vhost (no irqfd). Below is a fix
> but I doubt it is complete.
> 
> Thanks,
> 
> C.
> 
> 
> From dde4d7b21c851a33d2d03bddd18464ae4e777a3f Mon Sep 17 00:00:00 2001
> From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= <clg@kaod.org>
> Date: Sun, 9 Jan 2022 10:40:47 +0100
> Subject: [PATCH] vhost: Fix support for configure interrupt
> MIME-Version: 1.0
> Content-Type: text/plain; charset=UTF-8
> Content-Transfer-Encoding: 8bit
> 
> irqfd is not necessarily  in use (TCG using vhost).
> 
> Cc: Cindy Lu <lulu@redhat.com>
> Fixes: d5d24d859c39 ("virtio-pci: add support for configure interrupt")
> Signed-off-by: Cédric Le Goater <clg@kaod.org>
> ---
>  hw/virtio/virtio-pci.c | 10 ++++++----
>  1 file changed, 6 insertions(+), 4 deletions(-)
> 
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 98fb5493ae06..39ebb042860d 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -1134,10 +1134,12 @@ static int virtio_pci_set_guest_notifiers(DeviceState 
> *d, int nvqs, bool assign)
>              if (r < 0) {
>                  goto config_assign_error;
>              }
> -        }
> -        r = kvm_virtio_pci_vector_config_use(proxy);
> -        if (r < 0) {
> -            goto config_error;
> +
> +            r = kvm_virtio_pci_vector_config_use(proxy);
> +            if (r < 0) {
> +                abort();
> +                goto config_error;
> +            }
>          }
>          r = msix_set_vector_notifiers(&proxy->pci_dev, 
> virtio_pci_vector_unmask,
>                                        virtio_pci_vector_mask,
> -- 
> 2.31.1


Yes, we need to fix up cleanup too, but besides that it seems good,
right?

So like this then? Cindy?

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 98fb5493ae..722ce12b45 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -1134,10 +1134,10 @@ static int virtio_pci_set_guest_notifiers(DeviceState 
*d, int nvqs, bool assign)
             if (r < 0) {
                 goto config_assign_error;
             }
-        }
-        r = kvm_virtio_pci_vector_config_use(proxy);
-        if (r < 0) {
-            goto config_error;
+            r = kvm_virtio_pci_vector_config_use(proxy);
+            if (r < 0) {
+                goto config_error;
+            }
         }
         r = msix_set_vector_notifiers(&proxy->pci_dev, 
virtio_pci_vector_unmask,
                                       virtio_pci_vector_mask,
@@ -1155,7 +1155,9 @@ notifiers_error:
         kvm_virtio_pci_vector_release(proxy, nvqs);
     }
 config_error:
-    kvm_virtio_pci_vector_config_release(proxy);
+    if (with_irqfd) {
+        kvm_virtio_pci_vector_config_release(proxy);
+    }
 config_assign_error:
     virtio_pci_set_guest_notifier(d, VIRTIO_CONFIG_IRQ_IDX, !assign,
                                   with_irqfd);




reply via email to

[Prev in Thread] Current Thread [Next in Thread]