qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC PATCH] ppc/xics: introduce helpers to find an ICP from


From: Cédric Le Goater
Subject: [Qemu-devel] [RFC PATCH] ppc/xics: introduce helpers to find an ICP from some (CPU) index
Date: Mon, 19 Sep 2016 10:54:10 +0200

Today, the CPU index is used to index the icp array under xics. This
works correctly when the indexes are sync but that is an assumption
that could break future implementations. For instance, the PowerNV
platform CPUs use real HW ids and they are not contiguous.

Let's introduce some helpers to hide the underlying structure of the
ICP array. The criteria to look for an ICPstate is still the CPU index
but it is decorrelated from the array index.

This is an RFC to see what people think. It is used on the powernv
branch but it won't apply as it is on upstream. It should certainly be
optimised when a large number of CPUs are configured.

Signed-off-by: Cédric Le Goater <address@hidden>
---
 hw/intc/xics.c        |   59 ++++++++++++++++++++++++++++++++++++++++----------
 hw/intc/xics_kvm.c    |    7 +----
 hw/intc/xics_spapr.c  |   14 ++++++-----
 include/hw/ppc/xics.h |    1 
 4 files changed, 59 insertions(+), 22 deletions(-)

Index: qemu-dgibson-for-2.8.git/hw/intc/xics.c
===================================================================
--- qemu-dgibson-for-2.8.git.orig/hw/intc/xics.c
+++ qemu-dgibson-for-2.8.git/hw/intc/xics.c
@@ -50,12 +50,41 @@ int xics_get_cpu_index_by_dt_id(int cpu_
     return -1;
 }
 
+
+static ICPState *xics_get_icp(XICSState *xics, CPUState *cs)
+{
+    int i;
+
+    for (i = 0 ; i < xics->nr_servers; i++) {
+        ICPState *ss = &xics->ss[i];
+        if (ss->cs == NULL) {
+            ss->cs = cs;
+            return ss;
+        }
+    }
+
+    return NULL;
+}
+
+ICPState *xics_find_icp(XICSState *xics, int cpu_index)
+{
+    int i;
+
+    for (i = 0 ; i < xics->nr_servers; i++) {
+        ICPState *ss = &xics->ss[i];
+        if (ss->cs && ss->cs->cpu_index == cpu_index)
+            return ss;
+    }
+
+    return NULL;
+}
+
 void xics_cpu_destroy(XICSState *xics, PowerPCCPU *cpu)
 {
     CPUState *cs = CPU(cpu);
-    ICPState *ss = &xics->ss[cs->cpu_index];
+    ICPState *ss = xics_find_icp(xics, cs->cpu_index);
 
-    assert(cs->cpu_index < xics->nr_servers);
+    assert(ss);
     assert(cs == ss->cs);
 
     ss->output = NULL;
@@ -66,12 +95,10 @@ void xics_cpu_setup(XICSState *xics, Pow
 {
     CPUState *cs = CPU(cpu);
     CPUPPCState *env = &cpu->env;
-    ICPState *ss = &xics->ss[cs->cpu_index];
+    ICPState *ss = xics_get_icp(xics, cs);
     XICSStateClass *info = XICS_COMMON_GET_CLASS(xics);
 
-    assert(cs->cpu_index < xics->nr_servers);
-
-    ss->cs = cs;
+    assert(ss);
 
     if (info->cpu_setup) {
         info->cpu_setup(xics, cpu);
@@ -276,9 +303,11 @@ static void icp_check_ipi(ICPState *ss)
 
 static void icp_resend(XICSState *xics, int server)
 {
-    ICPState *ss = xics->ss + server;
+    ICPState *ss = xics_find_icp(xics, server);
     ICSState *ics;
 
+    assert(ss);
+
     if (ss->mfrr < CPPR(ss)) {
         icp_check_ipi(ss);
     }
@@ -289,10 +318,12 @@ static void icp_resend(XICSState *xics,
 
 void icp_set_cppr(XICSState *xics, int server, uint8_t cppr)
 {
-    ICPState *ss = xics->ss + server;
+    ICPState *ss = xics_find_icp(xics, server);
     uint8_t old_cppr;
     uint32_t old_xisr;
 
+    assert(ss);
+
     old_cppr = CPPR(ss);
     ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
 
@@ -316,7 +347,9 @@ void icp_set_cppr(XICSState *xics, int s
 
 void icp_set_mfrr(XICSState *xics, int server, uint8_t mfrr)
 {
-    ICPState *ss = xics->ss + server;
+    ICPState *ss = xics_find_icp(xics, server);
+
+    assert(ss);
 
     ss->mfrr = mfrr;
     if (mfrr < CPPR(ss)) {
@@ -348,10 +381,12 @@ uint32_t icp_ipoll(ICPState *ss, uint32_
 
 void icp_eoi(XICSState *xics, int server, uint32_t xirr)
 {
-    ICPState *ss = xics->ss + server;
+    ICPState *ss = xics_find_icp(xics, server);
     ICSState *ics;
     uint32_t irq;
 
+    assert(ss);
+
     /* Send EOI -> ICS */
     ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
     trace_xics_icp_eoi(server, xirr, ss->xirr);
@@ -369,7 +404,9 @@ void icp_eoi(XICSState *xics, int server
 static void icp_irq(ICSState *ics, int server, int nr, uint8_t priority)
 {
     XICSState *xics = ics->xics;
-    ICPState *ss = xics->ss + server;
+    ICPState *ss = xics_find_icp(xics, server);
+
+    assert(ss);
 
     trace_xics_icp_irq(server, nr, priority);
 
Index: qemu-dgibson-for-2.8.git/include/hw/ppc/xics.h
===================================================================
--- qemu-dgibson-for-2.8.git.orig/include/hw/ppc/xics.h
+++ qemu-dgibson-for-2.8.git/include/hw/ppc/xics.h
@@ -207,5 +207,6 @@ ICSState *xics_find_source(XICSState *ic
 void xics_add_ics(XICSState *xics);
 
 void xics_hmp_info_pic(Monitor *mon, const QDict *qdict);
+ICPState *xics_find_icp(XICSState *xics, int cpu_index);
 
 #endif /* XICS_H */
Index: qemu-dgibson-for-2.8.git/hw/intc/xics_spapr.c
===================================================================
--- qemu-dgibson-for-2.8.git.orig/hw/intc/xics_spapr.c
+++ qemu-dgibson-for-2.8.git/hw/intc/xics_spapr.c
@@ -67,9 +67,11 @@ static target_ulong h_xirr(PowerPCCPU *c
                            target_ulong opcode, target_ulong *args)
 {
     CPUState *cs = CPU(cpu);
-    uint32_t xirr = icp_accept(spapr->xics->ss + cs->cpu_index);
+    ICPState *ss = xics_find_icp(spapr->xics, cs->cpu_index);
 
-    args[0] = xirr;
+    assert(ss);
+
+    args[0] = icp_accept(ss);
     return H_SUCCESS;
 }
 
@@ -77,10 +79,9 @@ static target_ulong h_xirr_x(PowerPCCPU
                              target_ulong opcode, target_ulong *args)
 {
     CPUState *cs = CPU(cpu);
-    ICPState *ss = &spapr->xics->ss[cs->cpu_index];
-    uint32_t xirr = icp_accept(ss);
+    ICPState *ss = xics_find_icp(spapr->xics, cs->cpu_index);
 
-    args[0] = xirr;
+    args[0] = icp_accept(ss);
     args[1] = cpu_get_host_ticks();
     return H_SUCCESS;
 }
@@ -99,8 +100,9 @@ static target_ulong h_ipoll(PowerPCCPU *
                             target_ulong opcode, target_ulong *args)
 {
     CPUState *cs = CPU(cpu);
+    ICPState *ss = xics_find_icp(spapr->xics, cs->cpu_index);
     uint32_t mfrr;
-    uint32_t xirr = icp_ipoll(spapr->xics->ss + cs->cpu_index, &mfrr);
+    uint32_t xirr = icp_ipoll(ss, &mfrr);
 
     args[0] = xirr;
     args[1] = mfrr;
Index: qemu-dgibson-for-2.8.git/hw/intc/xics_kvm.c
===================================================================
--- qemu-dgibson-for-2.8.git.orig/hw/intc/xics_kvm.c
+++ qemu-dgibson-for-2.8.git/hw/intc/xics_kvm.c
@@ -326,14 +326,11 @@ static const TypeInfo ics_kvm_info = {
  */
 static void xics_kvm_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
 {
-    CPUState *cs;
-    ICPState *ss;
+    CPUState *cs = CPU(cpu);
+    ICPState *ss = xics_find_icp(xics, cs->cpu_index);
     KVMXICSState *xicskvm = XICS_SPAPR_KVM(xics);
     int ret;
 
-    cs = CPU(cpu);
-    ss = &xics->ss[cs->cpu_index];
-
     assert(cs->cpu_index < xics->nr_servers);
     if (xicskvm->kernel_xics_fd == -1) {
         abort();



reply via email to

[Prev in Thread] Current Thread [Next in Thread]