qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v1 13/16] virtio: add vhost-net migration of pac


From: Maxime Coquelin
Subject: Re: [Qemu-devel] [PATCH v1 13/16] virtio: add vhost-net migration of packed ring
Date: Wed, 28 Nov 2018 11:05:07 +0100
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.2.1



On 11/22/18 3:06 PM, address@hidden wrote:
From: Wei Xu <address@hidden>

tweaked vhost-net code to test migration.

@@ -1414,64 +1430,20 @@ long vhost_vring_ioctl(struct vhost_dev
             r = -EFAULT;
             break;
         }
+               vq->last_avail_idx = s.num & 0x7FFF;
+               /* Forget the cached index value. */
+               vq->avail_idx = vq->last_avail_idx;
+               if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+                       vq->last_avail_wrap_counter = !!(s.num & 0x8000);
+                       vq->avail_wrap_counter = vq->last_avail_wrap_counter;
+
+                       vq->last_used_idx = (s.num & 0x7fFF0000) >> 16;
+                       vq->last_used_wrap_counter = !!(s.num & 0x80000000);
+               }
+               break;
+       case VHOST_GET_VRING_BASE:
+               s.index = idx;
+                s.num = vq->last_avail_idx;
+               if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+                       s.num |= vq->last_avail_wrap_counter << 15;
+                       s.num |= vq->last_used_idx << 16;
+                       s.num |= vq->last_used_wrap_counter << 31;

We agreed to only have avail idx and its wrap counter for vhost-user,
I guess we should have the same for Kernel backend?

This is what we have for vhost-user backend:
bit[0:14] last_avail_idx
bit[15] last_avail_wrap_counter

+               }
+               if (copy_to_user(argp, &s, sizeof(s)))
+                       r = -EFAULT;
+               break;

Signed-off-by: Wei Xu <address@hidden>
---
  hw/virtio/virtio.c         | 35 ++++++++++++++++++++++++++++++-----
  include/hw/virtio/virtio.h |  4 ++--
  2 files changed, 32 insertions(+), 7 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 64d5c04..7487d3d 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -2963,19 +2963,40 @@ hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, 
int n)
      }
  }
-uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
+int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
  {
-    return vdev->vq[n].last_avail_idx;
+    int idx;
+
+    if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        idx = vdev->vq[n].last_avail_idx;
+        idx |= ((int)vdev->vq[n].avail_wrap_counter) << 15;
+        idx |= (vdev->vq[n].used_idx) << 16;
+        idx |= ((int)vdev->vq[n].used_wrap_counter) << 31;
+    } else {
+        idx = (int)vdev->vq[n].last_avail_idx;
+    }
+    return idx;
  }
-void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
+void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, int idx)
  {
-    vdev->vq[n].last_avail_idx = idx;
-    vdev->vq[n].shadow_avail_idx = idx;
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        vdev->vq[n].last_avail_idx = idx & 0x7fff;
+        vdev->vq[n].avail_wrap_counter = !!(idx & 0x8000);
+        vdev->vq[n].used_idx = (idx & 0x7fff0000) >> 16;
+        vdev->vq[n].used_wrap_counter = !!(idx & 0x80000000);
+    } else {
+        vdev->vq[n].last_avail_idx = idx;
+        vdev->vq[n].shadow_avail_idx = idx;
+    }
  }
void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
  {
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        return;
+    }
+
      rcu_read_lock();
      if (vdev->vq[n].vring.desc) {
          vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
@@ -2986,6 +3007,10 @@ void virtio_queue_restore_last_avail_idx(VirtIODevice 
*vdev, int n)
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
  {
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        return;
+    }
+
      rcu_read_lock();
      if (vdev->vq[n].vring.desc) {
          vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 9c1fa07..a6fdf3f 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -272,8 +272,8 @@ hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int 
n);
  hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n);
  hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n);
  hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n);
-uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
-void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx);
+int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
+void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, int idx);
  void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n);
  void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n);
  void virtio_queue_update_used_idx(VirtIODevice *vdev, int n);




reply via email to

[Prev in Thread] Current Thread [Next in Thread]