qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VM


From: Andrew Jones
Subject: Re: [Qemu-devel] [PATCH v2 2/2] ivshmem: use irqfd to interrupt among VMs
Date: Wed, 5 Dec 2012 04:25:02 -0500 (EST)


----- Original Message -----
> On Tue, Dec 4, 2012 at 4:10 AM, Andrew Jones <address@hidden>
> wrote:
> >
> >
> > ----- Original Message -----
> >> On Thu, Nov 29, 2012 at 1:34 AM, liu ping fan <address@hidden>
> >> wrote:
> >> > On Thu, Nov 29, 2012 at 12:42 PM, Cam Macdonell
> >> > <address@hidden> wrote:
> >> >> On Tue, Nov 27, 2012 at 7:53 PM, liu ping fan
> >> >> <address@hidden>
> >> >> wrote:
> >> >>> On Wed, Nov 28, 2012 at 5:48 AM, Cam Macdonell
> >> >>> <address@hidden> wrote:
> >> >>>> On Sat, Nov 24, 2012 at 8:51 PM, Liu Ping Fan
> >> >>>> <address@hidden> wrote:
> >> >>>>> From: Liu Ping Fan <address@hidden>
> >> >>>>>
> >> >>>>> Using irqfd, so we can avoid switch between kernel and user
> >> >>>>> when
> >> >>>>> VMs interrupts each other.
> >> >>>>
> >> >>>> Nice work.  Due to a hardware failure, there will be a small
> >> >>>> delay in
> >> >>>> me being able to test this.  I'll follow up as soon as I can.
> >> >>>>
> >> >>> BTW where can I find the latest guest code for test?
> >> >>> I got the guest code from
> >> >>> git://gitorious.org/nahanni/guest-code.git.
> >> >>> But it seems outdated, after fixing the unlocked_ioctl, and
> >> >>> vm-id
> >> >>> bits
> >> >>> position (the codes conflict with ivshmem spec), it works (I
> >> >>> have
> >> >>> tested for V1).
> >> >>
> >> >> Hello,
> >> >>
> >> >> Which device driver are you using?
> >> >>
> >> > guest-code/kernel_module/standard/kvm_ivshmem.c
> >>
> >> The uio driver is the recommended one, however if you want to use
> >> the
> >> kvm_ivshmem one and have it working, then feel free to continue.
> >
> > If the uio driver is the recommended one, then can you please post
> > it
> > to lkml? It should be integrated into drivers/virt with an
> > appropriate
> > Kconfig update.
> >
> 
> Sure.  Should it go under drivers/virt or drivers/uio?  It seems the
> uio drivers all get grouped together.

Good point. That is the current practice. As there's still only a
handful of uio drivers, then I guess it doesn't make sense to try and
change that at this point. It seems that it would make more sense to
use drivers/uio just for generic uio drivers though, and then the other
uio drivers would go under drivers/<type>/uio, e.g. drivers/virt/uio

Drew

> 
> Thanks,
> Cam
> 
> > Thanks,
> > Drew
> >
> >>
> >> I had deleted it form the repo, but some users had based solutions
> >> off
> >> it, so I added it back.
> >>
> >> btw, my hardware issue has been resolved, so I'll get to testing
> >> your
> >> patch soon.
> >>
> >> Sincerely,
> >> Cam
> >>
> >> >
> >> >> Cam
> >> >>
> >> >>>
> >> >>> Regards,
> >> >>> Pingfan
> >> >>>
> >> >>>>>
> >> >>>>> Signed-off-by: Liu Ping Fan <address@hidden>
> >> >>>>> ---
> >> >>>>>  hw/ivshmem.c |   54
> >> >>>>>  +++++++++++++++++++++++++++++++++++++++++++++++++++++-
> >> >>>>>  1 files changed, 53 insertions(+), 1 deletions(-)
> >> >>>>>
> >> >>>>> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
> >> >>>>> index 7c8630c..5709e89 100644
> >> >>>>> --- a/hw/ivshmem.c
> >> >>>>> +++ b/hw/ivshmem.c
> >> >>>>> @@ -19,6 +19,7 @@
> >> >>>>>  #include "hw.h"
> >> >>>>>  #include "pc.h"
> >> >>>>>  #include "pci.h"
> >> >>>>> +#include "msi.h"
> >> >>>>>  #include "msix.h"
> >> >>>>>  #include "kvm.h"
> >> >>>>>  #include "migration.h"
> >> >>>>> @@ -83,6 +84,7 @@ typedef struct IVShmemState {
> >> >>>>>      uint32_t vectors;
> >> >>>>>      uint32_t features;
> >> >>>>>      EventfdEntry *eventfd_table;
> >> >>>>> +    int *vector_virqs;
> >> >>>>>
> >> >>>>>      Error *migration_blocker;
> >> >>>>>
> >> >>>>> @@ -625,16 +627,62 @@ static int ivshmem_load(QEMUFile* f,
> >> >>>>> void
> >> >>>>> *opaque, int version_id)
> >> >>>>>      return 0;
> >> >>>>>  }
> >> >>>>>
> >> >>>>> +static int ivshmem_vector_use(PCIDevice *dev, unsigned
> >> >>>>> vector,
> >> >>>>> +                                     MSIMessage msg)
> >> >>>>> +{
> >> >>>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> >> >>>>> +    int virq;
> >> >>>>> +    EventNotifier *n =
> >> >>>>> &s->peers[s->vm_id].eventfds[vector];
> >> >>>>> +
> >> >>>>> +    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
> >> >>>>> +    if (virq >= 0 &&
> >> >>>>> kvm_irqchip_add_irqfd_notifier(kvm_state,
> >> >>>>> n, virq) >= 0) {
> >> >>>>> +        s->vector_virqs[vector] = virq;
> >> >>>>> +        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL,
> >> >>>>> NULL, NULL, NULL);
> >> >>>>> +    } else if (virq >= 0) {
> >> >>>>> +        kvm_irqchip_release_virq(kvm_state, virq);
> >> >>>>> +        error_report("ivshmem, can not setup irqfd\n");
> >> >>>>> +        return -1;
> >> >>>>> +    } else {
> >> >>>>> +        error_report("ivshmem, no enough msi route to setup
> >> >>>>> irqfd\n");
> >> >>>>> +        return -1;
> >> >>>>> +    }
> >> >>>>> +
> >> >>>>> +    return 0;
> >> >>>>> +}
> >> >>>>> +
> >> >>>>> +static void ivshmem_vector_release(PCIDevice *dev, unsigned
> >> >>>>> vector)
> >> >>>>> +{
> >> >>>>> +    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> >> >>>>> +    EventNotifier *n =
> >> >>>>> &s->peers[s->vm_id].eventfds[vector];
> >> >>>>> +    int virq = s->vector_virqs[vector];
> >> >>>>> +
> >> >>>>> +    if (s->vector_virqs[vector] >= 0) {
> >> >>>>> +        kvm_irqchip_remove_irqfd_notifier(kvm_state, n,
> >> >>>>> virq);
> >> >>>>> +        kvm_irqchip_release_virq(kvm_state, virq);
> >> >>>>> +        s->vector_virqs[vector] = -1;
> >> >>>>> +    }
> >> >>>>> +}
> >> >>>>> +
> >> >>>>>  static void ivshmem_write_config(PCIDevice *pci_dev,
> >> >>>>>  uint32_t
> >> >>>>>  address,
> >> >>>>>                                  uint32_t val, int len)
> >> >>>>>  {
> >> >>>>> +    bool is_enabled, was_enabled = msi_enabled(pci_dev);
> >> >>>>> +
> >> >>>>>      pci_default_write_config(pci_dev, address, val, len);
> >> >>>>> +    is_enabled = msi_enabled(pci_dev);
> >> >>>>> +    if (!was_enabled && is_enabled) {
> >> >>>>> +        msix_set_vector_notifiers(pci_dev,
> >> >>>>> ivshmem_vector_use,
> >> >>>>> +            ivshmem_vector_release);
> >> >>>>> +    } else if (was_enabled && !is_enabled) {
> >> >>>>> +        msix_unset_vector_notifiers(pci_dev);
> >> >>>>> +    }
> >> >>>>>  }
> >> >>>>>
> >> >>>>>  static int pci_ivshmem_init(PCIDevice *dev)
> >> >>>>>  {
> >> >>>>>      IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
> >> >>>>>      uint8_t *pci_conf;
> >> >>>>> +    int i;
> >> >>>>>
> >> >>>>>      if (s->sizearg == NULL)
> >> >>>>>          s->ivshmem_size = 4 << 20; /* 4 MB default */
> >> >>>>> @@ -758,7 +806,10 @@ static int pci_ivshmem_init(PCIDevice
> >> >>>>> *dev)
> >> >>>>>      }
> >> >>>>>
> >> >>>>>      s->dev.config_write = ivshmem_write_config;
> >> >>>>> -
> >> >>>>> +    s->vector_virqs = g_new0(int, s->vectors);
> >> >>>>> +    for (i = 0; i < s->vectors; i++) {
> >> >>>>> +        s->vector_virqs[i] = -1;
> >> >>>>> +    }
> >> >>>>>      return 0;
> >> >>>>>  }
> >> >>>>>
> >> >>>>> @@ -770,6 +821,7 @@ static void pci_ivshmem_uninit(PCIDevice
> >> >>>>> *dev)
> >> >>>>>          migrate_del_blocker(s->migration_blocker);
> >> >>>>>          error_free(s->migration_blocker);
> >> >>>>>      }
> >> >>>>> +    g_free(s->vector_virqs);
> >> >>>>>
> >> >>>>>      memory_region_destroy(&s->ivshmem_mmio);
> >> >>>>>      memory_region_del_subregion(&s->bar, &s->ivshmem);
> >> >>>>> --
> >> >>>>> 1.7.4.4
> >> >>>>>
> >> >>>
> >> >
> >>
> >>
> >
> 
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]