[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 23/27] Implement PAPR CRQ hypercalls
From: |
David Gibson |
Subject: |
[Qemu-devel] [PATCH 23/27] Implement PAPR CRQ hypercalls |
Date: |
Wed, 23 Mar 2011 16:30:43 +1100 |
This patch implements the infrastructure and hypercalls necessary for the
PAPR specified CRQ (Command Request Queue) mechanism. This general
request queueing system is used by many of the PAPR virtual IO devices,
including the virtual scsi adapter.
Signed-off-by: Ben Herrenschmidt <address@hidden>
Signed-off-by: David Gibson <address@hidden>
---
hw/spapr.c | 2 +-
hw/spapr_vio.c | 160 ++++++++++++++++++++++++++++++++++++++++++++++++++
hw/spapr_vio.h | 12 ++++
target-ppc/kvm_ppc.h | 11 ++++
4 files changed, 184 insertions(+), 1 deletions(-)
diff --git a/hw/spapr.c b/hw/spapr.c
index 98ca5ac..3d161db 100644
--- a/hw/spapr.c
+++ b/hw/spapr.c
@@ -66,7 +66,7 @@ static void *spapr_create_fdt(int *fdt_size, ram_addr_t
ramsize,
uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size);
uint32_t pft_size_prop[] = {0, cpu_to_be32(hash_shift)};
char hypertas_prop[] = "hcall-pft\0hcall-term\0hcall-dabr\0hcall-interrupt"
- "\0hcall-tce";
+ "\0hcall-tce\0hcall-vio";
uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(smp_cpus)};
int i;
char *modelname;
diff --git a/hw/spapr_vio.c b/hw/spapr_vio.c
index 39d77ee..8f14fcc 100644
--- a/hw/spapr_vio.c
+++ b/hw/spapr_vio.c
@@ -28,6 +28,7 @@
#include "hw/sysbus.h"
#include "kvm.h"
#include "device_tree.h"
+#include "kvm_ppc.h"
#include "hw/spapr.h"
#include "hw/spapr_vio.h"
@@ -359,6 +360,159 @@ uint64_t ldq_tce(VIOsPAPRDevice *dev, uint64_t taddr)
return tswap64(val);
}
+/*
+ * CRQ handling
+ */
+static target_ulong h_reg_crq(CPUState *env, sPAPREnvironment *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong reg = args[0];
+ target_ulong queue_addr = args[1];
+ target_ulong queue_len = args[2];
+ VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
+
+ if (!dev) {
+ hcall_dprintf("h_reg_crq on non-existent unit 0x"
+ TARGET_FMT_lx "\n", reg);
+ return H_PARAMETER;
+ }
+
+ /* We can't grok a queue size bigger than 256M for now */
+ if (queue_len < 0x1000 || queue_len > 0x10000000) {
+ hcall_dprintf("h_reg_crq, queue size too small or too big (0x%llx)\n",
+ (unsigned long long)queue_len);
+ return H_PARAMETER;
+ }
+
+ /* Check queue alignment */
+ if (queue_addr & 0xfff) {
+ hcall_dprintf("h_reg_crq, queue not aligned (0x%llx)\n",
+ (unsigned long long)queue_addr);
+ return H_PARAMETER;
+ }
+
+ /* Check if device supports CRQs */
+ if (!dev->crq.SendFunc) {
+ return H_NOT_FOUND;
+ }
+
+
+ /* Already a queue ? */
+ if (dev->crq.qsize) {
+ return H_RESOURCE;
+ }
+ dev->crq.qladdr = queue_addr;
+ dev->crq.qsize = queue_len;
+ dev->crq.qnext = 0;
+
+ dprintf("CRQ for dev 0x" TARGET_FMT_lx " registered at 0x"
+ TARGET_FMT_lx "/0x" TARGET_FMT_lx "\n",
+ reg, queue_addr, queue_len);
+ return H_SUCCESS;
+}
+
+static target_ulong h_free_crq(CPUState *env, sPAPREnvironment *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong reg = args[0];
+ VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
+
+ if (!dev) {
+ hcall_dprintf("h_free_crq on non-existent unit 0x"
+ TARGET_FMT_lx "\n", reg);
+ return H_PARAMETER;
+ }
+
+ dev->crq.qladdr = 0;
+ dev->crq.qsize = 0;
+ dev->crq.qnext = 0;
+
+ dprintf("CRQ for dev 0x" TARGET_FMT_lx " freed\n", reg);
+
+ return H_SUCCESS;
+}
+
+static target_ulong h_send_crq(CPUState *env, sPAPREnvironment *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong reg = args[0];
+ target_ulong msg_hi = args[1];
+ target_ulong msg_lo = args[2];
+ VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
+ uint64_t crq_mangle[2];
+
+ if (!dev) {
+ hcall_dprintf("h_send_crq on non-existent unit 0x"
+ TARGET_FMT_lx "\n", reg);
+ return H_PARAMETER;
+ }
+ crq_mangle[0] = cpu_to_be64(msg_hi);
+ crq_mangle[1] = cpu_to_be64(msg_lo);
+
+ if (dev->crq.SendFunc) {
+ return dev->crq.SendFunc(dev, (uint8_t *)crq_mangle);
+ }
+
+ return H_HARDWARE;
+}
+
+static target_ulong h_enable_crq(CPUState *env, sPAPREnvironment *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong reg = args[0];
+ VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
+
+ if (!dev) {
+ hcall_dprintf("h_enable_crq on non-existent unit 0x"
+ TARGET_FMT_lx "\n", reg);
+ return H_PARAMETER;
+ }
+
+ return 0;
+}
+
+/* Returns negative error, 0 success, or positive: queue full */
+int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq)
+{
+ int rc;
+ uint8_t byte;
+
+ if (!dev->crq.qsize) {
+ fprintf(stderr, "spapr_vio_send_creq on uninitialized queue\n");
+ return -1;
+ }
+
+ /* Maybe do a fast path for KVM just writing to the pages */
+ rc = spapr_tce_dma_read(dev, dev->crq.qladdr + dev->crq.qnext, &byte, 1);
+ if (rc) {
+ return rc;
+ }
+ if (byte != 0) {
+ return 1;
+ }
+
+ rc = spapr_tce_dma_write(dev, dev->crq.qladdr + dev->crq.qnext + 8,
+ &crq[8], 8);
+ if (rc) {
+ return rc;
+ }
+
+ kvmppc_eieio();
+
+ rc = spapr_tce_dma_write(dev, dev->crq.qladdr + dev->crq.qnext, crq, 8);
+ if (rc) {
+ return rc;
+ }
+
+ dev->crq.qnext = (dev->crq.qnext + 16) % dev->crq.qsize;
+
+ if (dev->signal_state & 1) {
+ qemu_irq_pulse(dev->qirq);
+ }
+
+ return 0;
+}
+
static int spapr_vio_busdev_init(DeviceState *qdev, DeviceInfo *qinfo)
{
VIOsPAPRDeviceInfo *info = (VIOsPAPRDeviceInfo *)qinfo;
@@ -431,6 +585,12 @@ VIOsPAPRBus *spapr_vio_bus_init(void)
/* hcall-tce */
spapr_register_hypercall(H_PUT_TCE, h_put_tce);
+ /* hcall-crq */
+ spapr_register_hypercall(H_REG_CRQ, h_reg_crq);
+ spapr_register_hypercall(H_FREE_CRQ, h_free_crq);
+ spapr_register_hypercall(H_SEND_CRQ, h_send_crq);
+ spapr_register_hypercall(H_ENABLE_CRQ, h_enable_crq);
+
for (qinfo = device_info_list; qinfo; qinfo = qinfo->next) {
VIOsPAPRDeviceInfo *info = (VIOsPAPRDeviceInfo *)qinfo;
diff --git a/hw/spapr_vio.h b/hw/spapr_vio.h
index 4cfaf55..ba16795 100644
--- a/hw/spapr_vio.h
+++ b/hw/spapr_vio.h
@@ -32,10 +32,19 @@ enum VIOsPAPR_TCEAccess {
SPAPR_TCE_RW = 3,
};
+struct VIOsPAPRDevice;
+
typedef struct VIOsPAPR_RTCE {
uint64_t tce;
} VIOsPAPR_RTCE;
+typedef struct VIOsPAPR_CRQ {
+ uint64_t qladdr;
+ uint32_t qsize;
+ uint32_t qnext;
+ int(*SendFunc)(struct VIOsPAPRDevice *vdev, uint8_t *crq);
+} VIOsPAPR_CRQ;
+
typedef struct VIOsPAPRDevice {
DeviceState qdev;
uint32_t reg;
@@ -44,6 +53,7 @@ typedef struct VIOsPAPRDevice {
target_ulong signal_state;
uint32_t rtce_window_size;
VIOsPAPR_RTCE *rtce_table;
+ VIOsPAPR_CRQ crq;
} VIOsPAPRDevice;
typedef struct VIOsPAPRBus {
@@ -81,6 +91,8 @@ void stw_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint32_t
val);
void stq_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint64_t val);
uint64_t ldq_tce(VIOsPAPRDevice *dev, uint64_t taddr);
+int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq);
+
void vty_putchars(VIOsPAPRDevice *sdev, uint8_t *buf, int len);
void spapr_vty_create(VIOsPAPRBus *bus,
uint32_t reg, CharDriverState *chardev,
diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
index 911b19e..5afb308 100644
--- a/target-ppc/kvm_ppc.h
+++ b/target-ppc/kvm_ppc.h
@@ -18,6 +18,17 @@ uint32_t kvmppc_get_tbfreq(void);
int kvmppc_get_hypercall(CPUState *env, uint8_t *buf, int buf_len);
int kvmppc_set_interrupt(CPUState *env, int irq, int level);
+#ifndef CONFIG_KVM
+#define kvmppc_eieio() do { } while (0)
+#else
+#define kvmppc_eieio() \
+ do { \
+ if (kvm_enabled()) { \
+ asm volatile("eieio" : : : "memory"); \
+ } \
+ } while (0)
+#endif
+
#ifndef KVM_INTERRUPT_SET
#define KVM_INTERRUPT_SET -1
#endif
--
1.7.1
- [Qemu-devel] [PATCH 11/27] Support 1T segments on ppc, (continued)
- [Qemu-devel] [PATCH 11/27] Support 1T segments on ppc, David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 17/27] Implement assorted pSeries hcalls and RTAS methods, David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 19/27] Add PAPR H_VIO_SIGNAL hypercall and infrastructure for VIO interrupts, David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 13/27] Start implementing pSeries logical partition machine, David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 15/27] Virtual hash page table handling on pSeries machine, David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 14/27] Implement the bus structure for PAPR virtual IO, David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 18/27] Implement the PAPR (pSeries) virtualized interrupt controller (xics), David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 20/27] Add (virtual) interrupt to PAPR virtual tty device, David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 16/27] Implement hcall based RTAS for pSeries machines, David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 22/27] Implement sPAPR Virtual LAN (ibmveth), David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 23/27] Implement PAPR CRQ hypercalls,
David Gibson <=
- [Qemu-devel] [PATCH 21/27] Implement TCE translation for sPAPR VIO, David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 26/27] Implement PAPR VPA functions for pSeries shared processor partitions, David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 25/27] Add a PAPR TCE-bypass mechanism for the pSeries machine, David Gibson, 2011/03/23
- [Qemu-devel] [PATCH 24/27] Implement PAPR virtual SCSI interface (ibmvscsi), David Gibson, 2011/03/23
- Message not available
- [Qemu-devel] Re: [0/27] Implement emulation of pSeries logical partitions (v4), Alexander Graf, 2011/03/23
- [Qemu-devel] Re: [0/27] Implement emulation of pSeries logical partitions (v4), Alexander Graf, 2011/03/23