qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH V5 15/18] virtio-pci: speedup MSI-X masking and unma


From: Jason Wang
Subject: [Qemu-devel] [PATCH V5 15/18] virtio-pci: speedup MSI-X masking and unmasking
Date: Wed, 1 Apr 2015 16:15:09 +0800

This patch tries to speed up the MSI-X masking and unmasking through
the mapping between vector and queues. With this patch it will there's
no need to go through all possible virtqueues, which may help to
reduce the time spent when doing MSI-X masking/unmasking a single
vector when more than hundreds or even thousands of virtqueues were
supported.

Tested with 80 queue pairs virito-net-pci by changing the smp affinity
in the background and doing netperf in the same time:

Before the patch:
5711.70 Gbits/sec
After the patch:
6830.98 Gbits/sec

About 19.6% improvements in throughput.

Cc: Michael S. Tsirkin <address@hidden>
Signed-off-by: Jason Wang <address@hidden>
---
 hw/virtio/virtio-pci.c | 40 +++++++++++++++++++++-------------------
 1 file changed, 21 insertions(+), 19 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index c38f33f..9a5242a 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -632,28 +632,30 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, 
unsigned vector,
 {
     VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
-    int ret, queue_no;
+    VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
+    int ret, index, unmasked = 0;
 
-    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
-        if (!virtio_queue_get_num(vdev, queue_no)) {
+    while (vq) {
+        index = virtio_queue_get_index(vdev, vq);
+        if (!virtio_queue_get_num(vdev, index)) {
             break;
         }
-        if (virtio_queue_vector(vdev, queue_no) != vector) {
-            continue;
-        }
-        ret = virtio_pci_vq_vector_unmask(proxy, queue_no, vector, msg);
+        ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
         if (ret < 0) {
             goto undo;
         }
+        vq = virtio_vector_next_queue(vq);
+        ++unmasked;
     }
+
     return 0;
 
 undo:
-    while (--queue_no >= 0) {
-        if (virtio_queue_vector(vdev, queue_no) != vector) {
-            continue;
-        }
-        virtio_pci_vq_vector_mask(proxy, queue_no, vector);
+    vq = virtio_vector_first_queue(vdev, vector);
+    while (vq && --unmasked >= 0) {
+        index = virtio_queue_get_index(vdev, vq);
+        virtio_pci_vq_vector_mask(proxy, index, vector);
+        vq = virtio_vector_next_queue(vq);
     }
     return ret;
 }
@@ -662,16 +664,16 @@ static void virtio_pci_vector_mask(PCIDevice *dev, 
unsigned vector)
 {
     VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
-    int queue_no;
+    VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
+    int index;
 
-    for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
-        if (!virtio_queue_get_num(vdev, queue_no)) {
+    while (vq) {
+        index = virtio_queue_get_index(vdev, vq);
+        if (!virtio_queue_get_num(vdev, index)) {
             break;
         }
-        if (virtio_queue_vector(vdev, queue_no) != vector) {
-            continue;
-        }
-        virtio_pci_vq_vector_mask(proxy, queue_no, vector);
+        virtio_pci_vq_vector_mask(proxy, index, vector);
+        vq = virtio_vector_next_queue(vq);
     }
 }
 
-- 
2.1.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]