qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH] vhost-net: cleanup host notifiers at last step


From: Michael S. Tsirkin
Subject: Re: [Qemu-devel] [PATCH] vhost-net: cleanup host notifiers at last step
Date: Sun, 28 Aug 2011 14:28:05 +0300
User-agent: Mutt/1.5.21 (2010-09-15)

On Fri, Aug 26, 2011 at 01:29:23PM -0500, Ryan Harper wrote:
> * Michael S. Tsirkin <address@hidden> [2011-08-25 05:11]:
> > When the vhost notifier is disabled, the userspace handler runs
> > immediately: virtio_pci_set_host_notifier_internal might
> > call virtio_queue_notify_vq.
> > Since the VQ state and the tap backend state aren't
> > recovered yet, this causes
> > "Guest moved used index from XXX to YYY" assertions.
> 
> When do we see this message?  I'm just wondering how we can test this?

You want vhost_net_stop to be called while guest is
doing things with the VQ. The specific crash was
observed on vm stop. But I think link down
on the tap device which vhost is active
might be the best way to trigger this.


> > 
> > The solution is to split out host notifier handling
> > from vhost VQ setup and disable notifiers as our last step
> > when we stop vhost-net. For symmetry enable them first thing
> > on start.
> > 
> > Signed-off-by: Michael S. Tsirkin <address@hidden>
> > ---
> >  hw/vhost.c     |   74 
> > +++++++++++++++++++++++++++++++++++++++++--------------
> >  hw/vhost.h     |    2 +
> >  hw/vhost_net.c |   16 ++++++++++--
> >  3 files changed, 70 insertions(+), 22 deletions(-)
> > 
> > diff --git a/hw/vhost.c b/hw/vhost.c
> > index 1886067..0870cb7 100644
> > --- a/hw/vhost.c
> > +++ b/hw/vhost.c
> > @@ -515,11 +515,6 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
> >      };
> >      struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
> > 
> > -    if (!vdev->binding->set_host_notifier) {
> > -        fprintf(stderr, "binding does not support host notifiers\n");
> > -        return -ENOSYS;
> > -    }
> > -
> >      vq->num = state.num = virtio_queue_get_num(vdev, idx);
> >      r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
> >      if (r) {
> > @@ -567,12 +562,6 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
> >          r = -errno;
> >          goto fail_alloc;
> >      }
> > -    r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true);
> > -    if (r < 0) {
> > -        fprintf(stderr, "Error binding host notifier: %d\n", -r);
> > -        goto fail_host_notifier;
> > -    }
> > -
> >      file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
> >      r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
> >      if (r) {
> > @@ -591,8 +580,6 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
> > 
> >  fail_call:
> >  fail_kick:
> > -    vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
> > -fail_host_notifier:
> >  fail_alloc:
> >      cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, 
> > idx),
> >                                0, 0);
> > @@ -618,12 +605,6 @@ static void vhost_virtqueue_cleanup(struct vhost_dev 
> > *dev,
> >          .index = idx,
> >      };
> >      int r;
> > -    r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
> > -    if (r < 0) {
> > -        fprintf(stderr, "vhost VQ %d host cleanup failed: %d\n", idx, r);
> > -        fflush(stderr);
> > -    }
> > -    assert (r >= 0);
> >      r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
> >      if (r < 0) {
> >          fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
> > @@ -697,6 +678,60 @@ bool vhost_dev_query(struct vhost_dev *hdev, 
> > VirtIODevice *vdev)
> >          hdev->force;
> >  }
> > 
> > +/* Stop processing guest IO notifications in qemu.
> > + * Start processing them in vhost in kernel.
> > + */
> > +int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
> > +{
> > +    int i, r;
> > +    if (!vdev->binding->set_host_notifier) {
> > +        fprintf(stderr, "binding does not support host notifiers\n");
> > +        r = -ENOSYS;
> > +        goto fail;
> > +    }
> > +
> > +    for (i = 0; i < hdev->nvqs; ++i) {
> > +        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, 
> > true);
> > +        if (r < 0) {
> > +            fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", 
> > i, -r);
> > +            goto fail_vq;
> > +        }
> > +    }
> > +
> > +    return 0;
> > +fail_vq:
> > +    while (--i >= 0) {
> > +        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, 
> > false);
> > +        if (r < 0) {
> > +            fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, 
> > -r);
> > +            fflush(stderr);
> > +        }
> > +        assert (r >= 0);
> > +    }
> > +fail:
> > +    return r;
> > +}
> > +
> > +/* Stop processing guest IO notifications in vhost.
> > + * Start processing them in qemu.
> > + * This might actually run the qemu handlers right away,
> > + * so virtio in qemu must be completely setup when this is called.
> > + */
> > +void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice 
> > *vdev)
> > +{
> > +    int i, r;
> > +
> > +    for (i = 0; i < hdev->nvqs; ++i) {
> > +        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, 
> > false);
> > +        if (r < 0) {
> > +            fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", 
> > i, -r);
> > +            fflush(stderr);
> > +        }
> > +        assert (r >= 0);
> > +    }
> > +}
> > +
> > +/* Host notifiers must be enabled at this point. */
> >  int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
> >  {
> >      int i, r;
> > @@ -762,6 +797,7 @@ fail:
> >      return r;
> >  }
> > 
> > +/* Host notifiers must be enabled at this point. */
> >  void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
> >  {
> >      int i, r;
> > diff --git a/hw/vhost.h b/hw/vhost.h
> > index c8c595a..c9452f0 100644
> > --- a/hw/vhost.h
> > +++ b/hw/vhost.h
> > @@ -46,5 +46,7 @@ void vhost_dev_cleanup(struct vhost_dev *hdev);
> >  bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev);
> >  int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
> >  void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
> > +int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
> > +void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice 
> > *vdev);
> > 
> >  #endif
> > diff --git a/hw/vhost_net.c b/hw/vhost_net.c
> > index a559812..950a6b8 100644
> > --- a/hw/vhost_net.c
> > +++ b/hw/vhost_net.c
> > @@ -139,16 +139,22 @@ int vhost_net_start(struct vhost_net *net,
> >  {
> >      struct vhost_vring_file file = { };
> >      int r;
> > +
> > +    net->dev.nvqs = 2;
> > +    net->dev.vqs = net->vqs;
> > +
> > +    r = vhost_dev_enable_notifiers(&net->dev, dev);
> > +    if (r < 0) {
> > +        goto fail_notifiers;
> > +    }
> >      if (net->dev.acked_features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
> >          tap_set_vnet_hdr_len(net->vc,
> >                               sizeof(struct virtio_net_hdr_mrg_rxbuf));
> >      }
> > 
> > -    net->dev.nvqs = 2;
> > -    net->dev.vqs = net->vqs;
> >      r = vhost_dev_start(&net->dev, dev);
> >      if (r < 0) {
> > -        return r;
> > +        goto fail_start;
> >      }
> > 
> >      net->vc->info->poll(net->vc, false);
> > @@ -173,6 +179,9 @@ fail:
> >      if (net->dev.acked_features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
> >          tap_set_vnet_hdr_len(net->vc, sizeof(struct virtio_net_hdr));
> >      }
> > +fail_start:
> > +    vhost_dev_disable_notifiers(&net->dev, dev);
> > +fail_notifiers:
> >      return r;
> >  }
> > 
> > @@ -190,6 +199,7 @@ void vhost_net_stop(struct vhost_net *net,
> >      if (net->dev.acked_features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
> >          tap_set_vnet_hdr_len(net->vc, sizeof(struct virtio_net_hdr));
> >      }
> > +    vhost_dev_disable_notifiers(&net->dev, dev);
> >  }
> > 
> >  void vhost_net_cleanup(struct vhost_net *net)
> > -- 
> > 1.7.5.53.gc233e
> 
> -- 
> Ryan Harper
> Software Engineer; Linux Technology Center
> IBM Corp., Austin, Tx
> address@hidden



reply via email to

[Prev in Thread] Current Thread [Next in Thread]