qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v4 14/28] spapr/xive: use the VCPU id as a VP identi


From: Cédric Le Goater
Subject: [Qemu-devel] [PATCH v4 14/28] spapr/xive: use the VCPU id as a VP identifier in the OS CAM.
Date: Thu, 7 Jun 2018 17:49:49 +0200

For the IVPE to find a matching VP among the VPs dispatched on the
physical processor threads, the model needs to update the OS CAM line
of the XIVE thread interrupt context with the VP identifier.

The model uses the VCPU id as the VP identifier for the sPAPR and
provide a set of helpers to do the conversion between identifiers.
EQ ids are also derived from the VCPU id.

sPAPRXive does not provision storage for a VPD table but the
XiveRouter handlers provide some extra checks to the routing
algorithm.

Signed-off-by: Cédric Le Goater <address@hidden>
---
 include/hw/ppc/spapr_xive.h | 15 +++++++++
 hw/intc/spapr_xive.c        | 80 +++++++++++++++++++++++++++++++++++++++++++++
 hw/intc/xive.c              | 12 +++++++
 3 files changed, 107 insertions(+)

diff --git a/include/hw/ppc/spapr_xive.h b/include/hw/ppc/spapr_xive.h
index 32733270f734..be1163a8f272 100644
--- a/include/hw/ppc/spapr_xive.h
+++ b/include/hw/ppc/spapr_xive.h
@@ -43,4 +43,19 @@ bool spapr_xive_irq_disable(sPAPRXive *xive, uint32_t lisn);
 void spapr_xive_pic_print_info(sPAPRXive *xive, Monitor *mon);
 qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn);
 
+/*
+ * sPAPR VP and EQ indexing helpers
+ */
+static inline uint32_t spapr_xive_vp_to_target(sPAPRXive *xive, uint8_t vp_blk,
+                                               uint32_t vp_idx)
+{
+    return vp_idx;
+}
+int spapr_xive_target_to_vp(XiveRouter *xrtr, uint32_t target,
+                            uint8_t *out_vp_blk, uint32_t *out_vp_idx);
+int spapr_xive_target_to_eq(XiveRouter *xrtr, uint32_t target, uint8_t prio,
+                            uint8_t *out_eq_blk, uint32_t *out_eq_idx);
+int spapr_xive_cpu_to_eq(XiveRouter *xrtr, PowerPCCPU *cpu, uint8_t prio,
+                         uint8_t *out_eq_blk, uint32_t *out_eq_idx);
+
 #endif /* PPC_SPAPR_XIVE_H */
diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c
index e006c199ed11..222c1266a547 100644
--- a/hw/intc/spapr_xive.c
+++ b/hw/intc/spapr_xive.c
@@ -184,6 +184,84 @@ static int spapr_xive_set_eq(XiveRouter *xrtr,
     return 0;
 }
 
+static int spapr_xive_get_vp(XiveRouter *xrtr,
+                             uint8_t vp_blk, uint32_t vp_idx, XiveVP *vp)
+{
+    sPAPRXive *xive = SPAPR_XIVE(xrtr);
+    uint32_t vcpu_id = spapr_xive_vp_to_target(xive, vp_blk, vp_idx);
+    PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
+
+    if (!cpu) {
+        return -1;
+    }
+
+    /*
+     * sPAPR does not maintain a VPD table. Return that the VP is
+     * valid if we have found a matching CPU
+     */
+    vp->w0 = VP_W0_VALID;
+    return 0;
+}
+
+static int spapr_xive_set_vp(XiveRouter *xrtr,
+                             uint8_t vp_blk, uint32_t vp_idx, XiveVP *vp)
+{
+    /* no VPD table */
+    return 0;
+}
+
+/*
+ * sPAPR VP indexing uses a simple mapping of the CPU vcpu_id
+ */
+int spapr_xive_target_to_vp(XiveRouter *xrtr, uint32_t target,
+                            uint8_t *out_vp_blk, uint32_t *out_vp_idx)
+{
+    PowerPCCPU *cpu = spapr_find_cpu(target);
+
+    if (!cpu) {
+        return -1;
+    }
+
+    if (out_vp_blk) {
+        *out_vp_blk = xrtr->chip_id;
+    }
+
+    if (out_vp_blk) {
+        *out_vp_idx = cpu->vcpu_id;
+    }
+    return 0;
+}
+
+/*
+ * sPAPR EQ indexing uses a simple mapping of the CPU vcpu_id, 8
+ * priorities per CPU
+ */
+int spapr_xive_cpu_to_eq(XiveRouter *xrtr, PowerPCCPU *cpu, uint8_t prio,
+                         uint8_t *out_eq_blk, uint32_t *out_eq_idx)
+{
+    if (!cpu) {
+        return -1;
+    }
+
+    if (out_eq_blk) {
+        *out_eq_blk = xrtr->chip_id;
+    }
+
+    if (out_eq_idx) {
+        *out_eq_idx = (cpu->vcpu_id << 3) + prio;
+    }
+    return 0;
+}
+
+int spapr_xive_target_to_eq(XiveRouter *xrtr, uint32_t target, uint8_t prio,
+                            uint8_t *out_eq_blk, uint32_t *out_eq_idx)
+{
+    PowerPCCPU *cpu = spapr_find_cpu(target);
+
+    return spapr_xive_cpu_to_eq(xrtr, cpu, prio, out_eq_blk, out_eq_idx);
+}
+
+
 static const VMStateDescription vmstate_spapr_xive_eq = {
     .name = TYPE_SPAPR_XIVE "/eq",
     .version_id = 1,
@@ -248,6 +326,8 @@ static void spapr_xive_class_init(ObjectClass *klass, void 
*data)
     xrc->set_ive = spapr_xive_set_ive;
     xrc->get_eq  = spapr_xive_get_eq;
     xrc->set_eq  = spapr_xive_set_eq;
+    xrc->get_vp  = spapr_xive_get_vp;
+    xrc->set_vp  = spapr_xive_set_vp;
 }
 
 static const TypeInfo spapr_xive_info = {
diff --git a/hw/intc/xive.c b/hw/intc/xive.c
index f249ffc8943e..671ea1c6c36b 100644
--- a/hw/intc/xive.c
+++ b/hw/intc/xive.c
@@ -486,6 +486,8 @@ static uint32_t xive_tctx_hw_cam(XiveTCTX *tctx, bool 
block_group)
 static void xive_tctx_reset(void *dev)
 {
     XiveTCTX *tctx = XIVE_TCTX(dev);
+    PowerPCCPU *cpu = POWERPC_CPU(tctx->cs);
+    CPUPPCState *env = &cpu->env;
 
     memset(tctx->regs, 0, sizeof(tctx->regs));
 
@@ -500,6 +502,16 @@ static void xive_tctx_reset(void *dev)
      */
     tctx->regs[TM_QW1_OS + TM_PIPR] =
         ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
+
+    /* The OS CAM is pushed by the hypervisor when the VP is scheduled
+     * to run on a HW thread. On QEMU, when running a pseries machine,
+     * hardwire the VCPU id as this is our VP identifier.
+     */
+    if (!msr_hv) {
+        uint32_t os_cam = cpu_to_be32(
+            TM_QW1W2_VO | tctx_cam_line(tctx->xrtr->chip_id, cpu->vcpu_id));
+        memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &os_cam, 4);
+    }
 }
 
 static void xive_tctx_realize(DeviceState *dev, Error **errp)
-- 
2.13.6




reply via email to

[Prev in Thread] Current Thread [Next in Thread]