[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 3/6] virtio for QEMU (v2)
From: |
Anthony Liguori |
Subject: |
[Qemu-devel] [PATCH 3/6] virtio for QEMU (v2) |
Date: |
Fri, 4 Apr 2008 23:02:52 -0500 |
This patch introduces virtio support over PCI. virtio is a generic virtual IO
framework for Linux first introduced in 2.6.23. Since 2.6.25, virtio has
supported a PCI transport which this patch implements.
Since the last time these patches were posted to qemu-devel, I've reworked it
to use the proper access functions to manipulate guest memory.
Since v1, I've updated the patch based on the IOVector refactoring.
Signed-off-by: Anthony Liguori <address@hidden>
diff --git a/Makefile.target b/Makefile.target
index 94f3e58..6815ba8 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -534,6 +534,9 @@ OBJS += pcnet.o
OBJS += rtl8139.o
OBJS += e1000.o
+# virtio devices
+OBJS += virtio.o
+
ifeq ($(TARGET_BASE_ARCH), i386)
# Hardware support
OBJS+= ide.o pckbd.o ps2.o vga.o $(SOUND_HW) dma.o
diff --git a/cutils.c b/cutils.c
index 9ef2fa6..814b3c4 100644
--- a/cutils.c
+++ b/cutils.c
@@ -95,3 +95,14 @@ time_t mktimegm(struct tm *tm)
t += 3600 * tm->tm_hour + 60 * tm->tm_min + tm->tm_sec;
return t;
}
+
+int fls(int i)
+{
+ int bit;
+
+ for (bit=31; bit >= 0; bit--)
+ if (i & (1 << bit))
+ return bit+1;
+
+ return 0;
+}
diff --git a/hw/virtio-pci.h b/hw/virtio-pci.h
new file mode 100644
index 0000000..9262e49
--- /dev/null
+++ b/hw/virtio-pci.h
@@ -0,0 +1,65 @@
+/*
+ * Virtio Support
+ *
+ * Copyright IBM, Corp. 2007-2008
+ *
+ * Authors:
+ * Anthony Liguori <address@hidden>
+ * Rusty Russell <address@hidden>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef _VIRTIO_PCI_H
+#define _VIRTIO_PCI_H
+
+/* from Linux's linux/virtio_ring.h */
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+
+/* This means don't notify other side when buffer added. */
+#define VRING_USED_F_NO_NOTIFY 1
+/* This means don't interrupt guest when buffer consumed. */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+#define VIRTIO_PCI_QUEUE_MAX 16
+
+/* from Linux's linux/virtio_pci.h */
+
+/* A 32-bit r/o bitmask of the features supported by the host */
+#define VIRTIO_PCI_HOST_FEATURES 0
+
+/* A 32-bit r/w bitmask of features activated by the guest */
+#define VIRTIO_PCI_GUEST_FEATURES 4
+
+/* A 32-bit r/w PFN for the currently selected queue */
+#define VIRTIO_PCI_QUEUE_PFN 8
+
+/* A 16-bit r/o queue size for the currently selected queue */
+#define VIRTIO_PCI_QUEUE_NUM 12
+
+/* A 16-bit r/w queue selector */
+#define VIRTIO_PCI_QUEUE_SEL 14
+
+/* A 16-bit r/w queue notifier */
+#define VIRTIO_PCI_QUEUE_NOTIFY 16
+
+/* An 8-bit device status register. */
+#define VIRTIO_PCI_STATUS 18
+
+/* An 8-bit r/o interrupt status register. Reading the value will return the
+ * current contents of the ISR and will also clear it. This is effectively
+ * a read-and-acknowledge. */
+#define VIRTIO_PCI_ISR 19
+
+#define VIRTIO_PCI_CONFIG 20
+
+/* Virtio ABI version, if we increment this, we break the guest driver. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+#endif
diff --git a/hw/virtio.c b/hw/virtio.c
new file mode 100644
index 0000000..42c5551
--- /dev/null
+++ b/hw/virtio.c
@@ -0,0 +1,594 @@
+/*
+ * Virtio Support
+ *
+ * Copyright IBM, Corp. 2007-2008
+ *
+ * Authors:
+ * Anthony Liguori <address@hidden>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <inttypes.h>
+#include <err.h>
+
+#include "virtio.h"
+#include "sysemu.h"
+#include "virtio-pci.h"
+
+typedef struct VRingDesc
+{
+ /* Address (guest-physical). */
+ uint64_t addr;
+ /* Length. */
+ uint32_t len;
+ /* The flags as indicated above. */
+ uint16_t flags;
+ /* We chain unused descriptors via this, too */
+ uint16_t next;
+} VRingDesc;
+
+typedef struct VRingAvail
+{
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[];
+} VRingAvail;
+
+typedef struct VRingUsedElem
+{
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was used (written to) */
+ uint32_t len;
+} VRingUsedElem;
+
+typedef struct VRingUsed
+{
+ uint16_t flags;
+ uint16_t idx;
+ VRingUsedElem ring[];
+} VRingUsed;
+
+typedef struct VRing
+{
+ unsigned int num;
+ target_phys_addr_t desc;
+ target_phys_addr_t avail;
+ target_phys_addr_t used;
+} VRing;
+
+struct VirtQueue
+{
+ VRing vring;
+ uint32_t pfn;
+ uint16_t last_avail_idx;
+ void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
+ int index;
+ VirtIODevice *vdev;
+};
+
+/* QEMU doesn't strictly need write barriers since everything runs in
+ * lock-step. We'll leave the calls to wmb() in though to make it obvious for
+ * KVM or if kqemu gets SMP support.
+ */
+
+#define wmb() do { } while (0)
+
+/* FIXME put this somewhere generic */
+#define offsetof(type, member) ((unsigned long)(&((type *)0)->member))
+
+/* virt queue functions */
+
+static void virtqueue_init(VirtQueue *vq, target_phys_addr_t p)
+{
+ vq->vring.desc = p;
+ vq->vring.avail = p + vq->vring.num * 16;
+ vq->vring.used = vq->vring.avail + 2 * (2 + vq->vring.num);
+ vq->vring.used = TARGET_PAGE_ALIGN(vq->vring.used);
+}
+
+static uint64_t vring_desc_addr(VirtQueue *vq, unsigned int i)
+{
+ return ldq_phys(vq->vring.desc + i * sizeof(VRingDesc) +
+ offsetof(VRingDesc, addr));
+}
+
+static uint32_t vring_desc_len(VirtQueue *vq, unsigned int i)
+{
+ return ldl_phys(vq->vring.desc + i * sizeof(VRingDesc) +
+ offsetof(VRingDesc, len));
+}
+
+static uint16_t vring_desc_flags(VirtQueue *vq, unsigned int i)
+{
+ return lduw_phys(vq->vring.desc + i * sizeof(VRingDesc) +
+ offsetof(VRingDesc, flags));
+}
+
+static uint16_t vring_desc_next(VirtQueue *vq, unsigned int i)
+{
+ return lduw_phys(vq->vring.desc + i * sizeof(VRingDesc) +
+ offsetof(VRingDesc, next));
+}
+
+static uint16_t vring_avail_flags(VirtQueue *vq)
+{
+ return lduw_phys(vq->vring.avail + offsetof(VRingAvail, flags));
+}
+
+static uint16_t vring_avail_idx(VirtQueue *vq)
+{
+ return lduw_phys(vq->vring.avail + offsetof(VRingAvail, idx));
+}
+
+static uint16_t vring_avail_ring(VirtQueue *vq, unsigned int i)
+{
+ return lduw_phys(vq->vring.avail + offsetof(VRingAvail, ring[i]));
+}
+
+static void vring_used_set_flag(VirtQueue *vq, uint16_t flag)
+{
+ stw_phys(vq->vring.used + offsetof(VRingUsed, flags),
+ lduw_phys(vq->vring.used + offsetof(VRingUsed, flags)) | flag);
+}
+
+static void vring_used_unset_flag(VirtQueue *vq, uint16_t flag)
+{
+ stw_phys(vq->vring.used + offsetof(VRingUsed, flags),
+ lduw_phys(vq->vring.used + offsetof(VRingUsed, flags)) & ~flag);
+}
+
+static uint16_t vring_used_get_idx(VirtQueue *vq)
+{
+ return lduw_phys(vq->vring.used + offsetof(VRingUsed, idx));
+}
+
+static void vring_used_set_idx(VirtQueue *vq, uint16_t value)
+{
+ stw_phys(vq->vring.used + offsetof(VRingUsed, idx), value);
+}
+
+static void vring_used_set_ring(VirtQueue *vq, unsigned int i,
+ uint32_t id, uint32_t len)
+{
+ stl_phys(vq->vring.used + offsetof(VRingUsed, ring[i].id), id);
+ stl_phys(vq->vring.used + offsetof(VRingUsed, ring[i].len), len);
+}
+
+static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i)
+{
+ unsigned int next;
+
+ /* If this descriptor says it doesn't chain, we're done. */
+ if (!(vring_desc_flags(vq, i) & VRING_DESC_F_NEXT))
+ return vq->vring.num;
+
+ /* Check they're not leading us off end of descriptors. */
+ next = vring_desc_next(vq, i);
+ /* Make sure compiler knows to grab that: we don't want it changing! */
+ wmb();
+
+ if (next >= vq->vring.num)
+ errx(1, "Desc next is %u", next);
+
+ return next;
+}
+
+void virtqueue_push(VirtQueue *vq, VirtQueueElement *elem, unsigned int len)
+{
+ uint16_t idx;
+
+ pci_device_dma_unmap(&vq->vdev->pci_dev, elem->phys_in, elem->virt_in, 1);
+ pci_device_dma_unmap(&vq->vdev->pci_dev, elem->phys_out, elem->virt_out,
0);
+
+ idx = vring_used_get_idx(vq);
+ vring_used_set_ring(vq, idx % vq->vring.num, elem->index, len);
+ wmb();
+ vring_used_set_idx(vq, idx + 1);
+
+ qemu_free(elem->phys_in);
+ qemu_free(elem->phys_out);
+ qemu_free(elem);
+}
+
+VirtQueueElement *virtqueue_pop(VirtQueue *vq)
+{
+ unsigned int i, head;
+ unsigned int position;
+ VirtQueueElement *elem;
+
+ /* Check it isn't doing very strange things with descriptor numbers. */
+ if ((uint16_t)(vring_avail_idx(vq) - vq->last_avail_idx) > vq->vring.num)
+ errx(1, "Guest moved used index from %u to %u",
+ vq->last_avail_idx, vring_avail_idx(vq));
+
+ /* If there's nothing new since last we looked, return invalid. */
+ if (vring_avail_idx(vq) == vq->last_avail_idx)
+ return NULL;
+
+ /* Grab the next descriptor number they're advertising, and increment
+ * the index we've seen. */
+ head = vring_avail_ring(vq, vq->last_avail_idx++ % vq->vring.num);
+
+ /* If their number is silly, that's a fatal mistake. */
+ if (head >= vq->vring.num)
+ errx(1, "Guest says index %u is available", head);
+
+ /* When we start there are none of either input nor output. */
+ position = 0;
+
+ elem = qemu_mallocz(sizeof(VirtQueueElement));
+
+ elem->phys_in = iovector_new(vq->vring.num);
+ elem->phys_out = iovector_new(vq->vring.num);
+
+ elem->phys_in->num = elem->phys_out->num = 0;
+
+ i = head;
+ do {
+ IOVectorElement *sge;
+
+ if (vring_desc_flags(vq, i) & VRING_DESC_F_WRITE)
+ sge = &elem->phys_in->sg[elem->phys_in->num++];
+ else
+ sge = &elem->phys_out->sg[elem->phys_out->num++];
+
+ /* Grab the first descriptor, and check it's OK. */
+ sge->len = vring_desc_len(vq, i);
+ sge->base = vring_desc_addr(vq, i);
+
+ /* If we've got too many, that implies a descriptor loop. */
+ if ((elem->phys_in->num + elem->phys_out->num) > vq->vring.num)
+ errx(1, "Looped descriptor");
+ } while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num);
+
+ elem->virt_in = pci_device_dma_map(&vq->vdev->pci_dev, elem->phys_in);
+ elem->virt_out = pci_device_dma_map(&vq->vdev->pci_dev, elem->phys_out);
+ elem->index = head;
+
+ if (elem->virt_in == NULL || elem->virt_out == NULL)
+ errx(1, "Bad DMA");
+
+ return elem;
+}
+
+/* virtio device */
+
+static VirtIODevice *to_virtio_device(PCIDevice *pci_dev)
+{
+ return (VirtIODevice *)pci_dev;
+}
+
+static void virtio_update_irq(VirtIODevice *vdev)
+{
+ qemu_set_irq(vdev->pci_dev.irq[0], vdev->isr & 1);
+}
+
+void virtio_reset(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+ int i;
+
+ vdev->features = 0;
+ vdev->queue_sel = 0;
+ vdev->status = 0;
+ vdev->isr = 0;
+
+ for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+ vdev->vq[i].vring.desc = 0;
+ vdev->vq[i].vring.avail = 0;
+ vdev->vq[i].vring.used = 0;
+ vdev->vq[i].last_avail_idx = 0;
+ vdev->vq[i].pfn = 0;
+ }
+}
+
+static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
+{
+ VirtIODevice *vdev = to_virtio_device(opaque);
+ ram_addr_t pa;
+
+ addr -= vdev->addr;
+
+ switch (addr) {
+ case VIRTIO_PCI_GUEST_FEATURES:
+ if (vdev->set_features)
+ vdev->set_features(vdev, val);
+ vdev->features = val;
+ break;
+ case VIRTIO_PCI_QUEUE_PFN:
+ pa = (ram_addr_t)val << TARGET_PAGE_BITS;
+ vdev->vq[vdev->queue_sel].pfn = val;
+ if (pa == 0)
+ virtio_reset(vdev);
+ else
+ virtqueue_init(&vdev->vq[vdev->queue_sel], pa);
+ break;
+ case VIRTIO_PCI_QUEUE_SEL:
+ if (val < VIRTIO_PCI_QUEUE_MAX)
+ vdev->queue_sel = val;
+ break;
+ case VIRTIO_PCI_QUEUE_NOTIFY:
+ if (val < VIRTIO_PCI_QUEUE_MAX && vdev->vq[val].vring.desc)
+ vdev->vq[val].handle_output(vdev, &vdev->vq[val]);
+ break;
+ case VIRTIO_PCI_STATUS:
+ vdev->status = val & 0xFF;
+ if (vdev->status == 0)
+ virtio_reset(vdev);
+ break;
+ }
+}
+
+static uint32_t virtio_ioport_read(void *opaque, uint32_t addr)
+{
+ VirtIODevice *vdev = to_virtio_device(opaque);
+ uint32_t ret = 0xFFFFFFFF;
+
+ addr -= vdev->addr;
+
+ switch (addr) {
+ case VIRTIO_PCI_HOST_FEATURES:
+ ret = vdev->get_features(vdev);
+ break;
+ case VIRTIO_PCI_GUEST_FEATURES:
+ ret = vdev->features;
+ break;
+ case VIRTIO_PCI_QUEUE_PFN:
+ ret = vdev->vq[vdev->queue_sel].pfn;
+ break;
+ case VIRTIO_PCI_QUEUE_NUM:
+ ret = vdev->vq[vdev->queue_sel].vring.num;
+ break;
+ case VIRTIO_PCI_QUEUE_SEL:
+ ret = vdev->queue_sel;
+ break;
+ case VIRTIO_PCI_STATUS:
+ ret = vdev->status;
+ break;
+ case VIRTIO_PCI_ISR:
+ /* reading from the ISR also clears it. */
+ ret = vdev->isr;
+ vdev->isr = 0;
+ virtio_update_irq(vdev);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static uint32_t virtio_config_readb(void *opaque, uint32_t addr)
+{
+ VirtIODevice *vdev = opaque;
+ uint8_t val;
+
+ addr -= vdev->addr + VIRTIO_PCI_CONFIG;
+ if (addr > (vdev->config_len - sizeof(val)))
+ return (uint32_t)-1;
+
+ memcpy(&val, vdev->config + addr, sizeof(val));
+ return val;
+}
+
+static uint32_t virtio_config_readw(void *opaque, uint32_t addr)
+{
+ VirtIODevice *vdev = opaque;
+ uint16_t val;
+
+ addr -= vdev->addr + VIRTIO_PCI_CONFIG;
+ if (addr > (vdev->config_len - sizeof(val)))
+ return (uint32_t)-1;
+
+ memcpy(&val, vdev->config + addr, sizeof(val));
+ return val;
+}
+
+static uint32_t virtio_config_readl(void *opaque, uint32_t addr)
+{
+ VirtIODevice *vdev = opaque;
+ uint32_t val;
+
+ addr -= vdev->addr + VIRTIO_PCI_CONFIG;
+ if (addr > (vdev->config_len - sizeof(val)))
+ return (uint32_t)-1;
+
+ memcpy(&val, vdev->config + addr, sizeof(val));
+ return val;
+}
+
+static void virtio_config_writeb(void *opaque, uint32_t addr, uint32_t data)
+{
+ VirtIODevice *vdev = opaque;
+ uint8_t val = data;
+
+ addr -= vdev->addr + VIRTIO_PCI_CONFIG;
+ if (addr > (vdev->config_len - sizeof(val)))
+ return;
+
+ memcpy(vdev->config + addr, &val, sizeof(val));
+
+ if (vdev->set_config)
+ vdev->set_config(vdev, vdev->config);
+}
+
+static void virtio_config_writew(void *opaque, uint32_t addr, uint32_t data)
+{
+ VirtIODevice *vdev = opaque;
+ uint16_t val = data;
+
+ addr -= vdev->addr + VIRTIO_PCI_CONFIG;
+ if (addr > (vdev->config_len - sizeof(val)))
+ return;
+
+ memcpy(vdev->config + addr, &val, sizeof(val));
+
+ if (vdev->set_config)
+ vdev->set_config(vdev, vdev->config);
+}
+
+static void virtio_config_writel(void *opaque, uint32_t addr, uint32_t data)
+{
+ VirtIODevice *vdev = opaque;
+ uint32_t val = data;
+
+ addr -= vdev->addr + VIRTIO_PCI_CONFIG;
+ if (addr > (vdev->config_len - sizeof(val)))
+ return;
+
+ memcpy(vdev->config + addr, &val, sizeof(val));
+
+ if (vdev->set_config)
+ vdev->set_config(vdev, vdev->config);
+}
+
+static void virtio_map(PCIDevice *pci_dev, int region_num,
+ uint32_t addr, uint32_t size, int type)
+{
+ VirtIODevice *vdev = to_virtio_device(pci_dev);
+ int i;
+
+ vdev->addr = addr;
+ for (i = 0; i < 3; i++) {
+ register_ioport_write(addr, 20, 1 << i, virtio_ioport_write, vdev);
+ register_ioport_read(addr, 20, 1 << i, virtio_ioport_read, vdev);
+ }
+
+ if (vdev->config_len) {
+ register_ioport_write(addr + 20, vdev->config_len, 1,
+ virtio_config_writeb, vdev);
+ register_ioport_write(addr + 20, vdev->config_len, 2,
+ virtio_config_writew, vdev);
+ register_ioport_write(addr + 20, vdev->config_len, 4,
+ virtio_config_writel, vdev);
+ register_ioport_read(addr + 20, vdev->config_len, 1,
+ virtio_config_readb, vdev);
+ register_ioport_read(addr + 20, vdev->config_len, 2,
+ virtio_config_readw, vdev);
+ register_ioport_read(addr + 20, vdev->config_len, 4,
+ virtio_config_readl, vdev);
+
+ vdev->get_config(vdev, vdev->config);
+ }
+}
+
+VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
+ void (*handle_output)(VirtIODevice *, VirtQueue *))
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+ if (vdev->vq[i].vring.num == 0)
+ break;
+ }
+
+ if (i == VIRTIO_PCI_QUEUE_MAX)
+ abort();
+
+ vdev->vq[i].vring.num = queue_size;
+ vdev->vq[i].handle_output = handle_output;
+ vdev->vq[i].index = i;
+ vdev->vq[i].vdev = vdev;
+
+ return &vdev->vq[i];
+}
+
+void virtio_notify_config(VirtIODevice *vdev)
+{
+ /* make sure we have the latest config */
+ vdev->get_config(vdev, vdev->config);
+ vdev->isr = 3;
+ virtio_update_irq(vdev);
+}
+
+void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
+{
+ /* Always notify when queue is empty */
+ if (vring_avail_idx(vq) != vq->last_avail_idx &&
+ (vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT))
+ return;
+
+ vdev->isr = 1;
+ virtio_update_irq(vdev);
+}
+
+void virtio_ring_set_used_no_notify(VirtQueue *vq, int enable)
+{
+ if (enable)
+ vring_used_set_flag(vq, VRING_USED_F_NO_NOTIFY);
+ else
+ vring_used_unset_flag(vq, VRING_USED_F_NO_NOTIFY);
+}
+
+size_t virtio_ring_avail_size(VirtQueue *vq)
+{
+ return vring_avail_idx(vq) - vq->last_avail_idx;
+}
+
+int virtio_ring_inited(VirtQueue *vq)
+{
+ return (vq->vring.avail != 0);
+}
+
+VirtIODevice *virtio_init_pci(PCIBus *bus, const char *name,
+ uint16_t vendor, uint16_t device,
+ uint16_t subvendor, uint16_t subdevice,
+ uint8_t class_code, uint8_t subclass_code,
+ uint8_t pif, size_t config_size,
+ size_t struct_size)
+{
+ VirtIODevice *vdev;
+ PCIDevice *pci_dev;
+ uint8_t *config;
+ uint32_t size;
+
+ pci_dev = pci_register_device(bus, name, struct_size,
+ -1, NULL, NULL);
+ vdev = to_virtio_device(pci_dev);
+
+ vdev->status = 0;
+ vdev->isr = 0;
+ vdev->queue_sel = 0;
+ vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
+
+ config = pci_dev->config;
+ config[0x00] = vendor & 0xFF;
+ config[0x01] = (vendor >> 8) & 0xFF;
+ config[0x02] = device & 0xFF;
+ config[0x03] = (device >> 8) & 0xFF;
+
+ config[0x08] = VIRTIO_PCI_ABI_VERSION;
+
+ config[0x09] = pif;
+ config[0x0a] = subclass_code;
+ config[0x0b] = class_code;
+ config[0x0e] = 0x00;
+
+ config[0x2c] = subvendor & 0xFF;
+ config[0x2d] = (subvendor >> 8) & 0xFF;
+ config[0x2e] = subdevice & 0xFF;
+ config[0x2f] = (subdevice >> 8) & 0xFF;
+
+ config[0x3d] = 1;
+
+ vdev->name = name;
+ vdev->config_len = config_size;
+ if (vdev->config_len)
+ vdev->config = qemu_mallocz(config_size);
+ else
+ vdev->config = NULL;
+
+ size = 20 + config_size;
+ if (size & (size-1))
+ size = 1 << fls(size);
+
+ pci_register_io_region(pci_dev, 0, size, PCI_ADDRESS_SPACE_IO,
+ virtio_map);
+ qemu_register_reset(virtio_reset, vdev);
+
+ return vdev;
+}
diff --git a/hw/virtio.h b/hw/virtio.h
new file mode 100644
index 0000000..25c3b62
--- /dev/null
+++ b/hw/virtio.h
@@ -0,0 +1,89 @@
+/*
+ * Virtio Support
+ *
+ * Copyright IBM, Corp. 2007-2008
+ *
+ * Authors:
+ * Anthony Liguori <address@hidden>
+ * Rusty Russell <address@hidden>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef _QEMU_VIRTIO_H
+#define _QEMU_VIRTIO_H
+
+#include "hw.h"
+#include "pci.h"
+#include "iovector.h"
+
+/* from Linux's linux/virtio_config.h */
+
+/* Status byte for guest to report progress, and synchronize features. */
+/* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */
+#define VIRTIO_CONFIG_S_ACKNOWLEDGE 1
+/* We have found a driver for the device. */
+#define VIRTIO_CONFIG_S_DRIVER 2
+/* Driver has used its parts of the config, and is happy */
+#define VIRTIO_CONFIG_S_DRIVER_OK 4
+/* We've given up on this device. */
+#define VIRTIO_CONFIG_S_FAILED 0x80
+
+typedef struct VirtQueue VirtQueue;
+typedef struct VirtIODevice VirtIODevice;
+
+typedef struct VirtQueueElement
+{
+ unsigned int index;
+ IOVector *virt_in, *virt_out;
+ IOVector *phys_in, *phys_out;
+} VirtQueueElement;
+
+struct VirtIODevice
+{
+ PCIDevice pci_dev;
+ const char *name;
+ uint32_t addr;
+ uint16_t vendor;
+ uint16_t device;
+ uint8_t status;
+ uint8_t isr;
+ uint16_t queue_sel;
+ uint32_t features;
+ size_t config_len;
+ void *config;
+ uint32_t (*get_features)(VirtIODevice *vdev);
+ void (*set_features)(VirtIODevice *vdev, uint32_t val);
+ void (*get_config)(VirtIODevice *vdev, uint8_t *config);
+ void (*set_config)(VirtIODevice *vdev, const uint8_t *config);
+ VirtQueue *vq;
+};
+
+VirtIODevice *virtio_init_pci(PCIBus *bus, const char *name,
+ uint16_t vendor, uint16_t device,
+ uint16_t subvendor, uint16_t subdevice,
+ uint8_t class_code, uint8_t subclass_code,
+ uint8_t pif, size_t config_size,
+ size_t struct_size);
+
+VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
+ void (*handle_output)(VirtIODevice *,
+ VirtQueue *));
+
+void virtqueue_push(VirtQueue *vq, VirtQueueElement *elem, unsigned int len);
+
+VirtQueueElement *virtqueue_pop(VirtQueue *vq);
+
+void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
+
+void virtio_ring_set_used_no_notify(VirtQueue *vq, int enable);
+
+size_t virtio_ring_avail_size(VirtQueue *vq);
+
+int virtio_ring_inited(VirtQueue *vq);
+
+void virtio_notify_config(VirtIODevice *vdev);
+
+#endif
diff --git a/qemu-common.h b/qemu-common.h
index 746dcc5..cd387b1 100644
--- a/qemu-common.h
+++ b/qemu-common.h
@@ -85,6 +85,7 @@ char *pstrcat(char *buf, int buf_size, const char *s);
int strstart(const char *str, const char *val, const char **ptr);
int stristart(const char *str, const char *val, const char **ptr);
time_t mktimegm(struct tm *tm);
+int fls(int i);
/* Error handling. */
- [Qemu-devel] [PATCH 1/6] Use ram_addr_t for cpu_get_physical_page_desc (v2), Anthony Liguori, 2008/04/05
- Re: [Qemu-devel] [PATCH 2/6] PCI DMA API (v2), Blue Swirl, 2008/04/06
- Re: [kvm-devel] [Qemu-devel] [PATCH 2/6] PCI DMA API (v2), Anthony Liguori, 2008/04/06
- Re: [kvm-devel] [Qemu-devel] [PATCH 2/6] PCI DMA API (v2), andrzej zaborowski, 2008/04/06
- Re: [kvm-devel] [Qemu-devel] [PATCH 2/6] PCI DMA API (v2), Anthony Liguori, 2008/04/06
- Re: [kvm-devel] [Qemu-devel] [PATCH 2/6] PCI DMA API (v2), Paul Brook, 2008/04/06
- Re: [kvm-devel] [Qemu-devel] [PATCH 2/6] PCI DMA API (v2), andrzej zaborowski, 2008/04/07
Re: [Qemu-devel] [PATCH 2/6] PCI DMA API (v2), Paul Brook, 2008/04/07