qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v3 1/8] ppc/xics: introduce an 'icp' backlink un


From: David Gibson
Subject: Re: [Qemu-devel] [PATCH v3 1/8] ppc/xics: introduce an 'icp' backlink under PowerPCCPU
Date: Thu, 30 Mar 2017 12:26:16 +1100
User-agent: Mutt/1.8.0 (2017-02-23)

On Wed, Mar 29, 2017 at 09:14:01AM +0200, Cédric Le Goater wrote:
> On 03/29/2017 06:11 AM, David Gibson wrote:
> > On Tue, Mar 28, 2017 at 09:32:25AM +0200, Cédric Le Goater wrote:
> >> Today, the ICPState array of the sPAPR machine is indexed with
> >> 'cpu_index' of the CPUState. This numbering of CPUs is internal to
> >> QEMU and the guest only knows about what is exposed in the device
> >> tree, that is the 'cpu_dt_id'. This is why sPAPR uses the helper
> >> xics_get_cpu_index_by_dt_id() to do the mapping in a couple of places.
> >>
> >> To provide a more generic XICS layer, we need to abstract the IRQ
> >> 'server' number and remove any assumption made on its nature. It
> >> should not be used as a 'cpu_index' for lookups like xics_cpu_setup()
> >> and xics_cpu_destroy() do.
> >>
> >> To reach that goal, we choose to introduce an 'icp' backlink under
> >> PowerPCCPU, and let the machine core init routine do the ICPState
> >> lookup. The resulting object is stored under PowerPCCPU which is
> >> passed on to xics_cpu_setup(). The IRQ 'server' number in XICS is now
> >> generic. sPAPR uses 'cpu_dt_id' and PowerNV will use 'PIR' number.
> >>
> >> This also has the benefit of simplifying the sPAPR hcall routines
> >> which do not need to do any ICPState lookups anymore.
> > 
> > Since you've changed the type to a generic Object *, the name needs to
> > be changed to something generic as well.  Maybe 'intc' or
> > 'irq_private'.
> 
> yes. I think 'intc' is a good choice, unless we make the type 
> 'void *' and in that case 'irq_private' would be better.
> 
> I took a quick look at the other machines under ppc. It is not 
> obvious how we could use that 'intc' pointer. May be for the 
> objects TYPE_OPENPIC and heathrow_pic.
> 
> The ppc405 machines could use it to store 'qemu_irq *pic' but 
> I am not sure there is much benefit doing that. To be studied.

Right.  I don't know that it will lead to any easy, quick conversions
of existing platforms.  It will mostly be of use for modern, high
performance type intcs (like XICS, or IO-APIC/LAPIC) - older style
simpler intcs may not have a use for it.  We'll probably want
something like it when we come to implement POWER9's XIVE, though.

> 
> Anyhow I will change the name.
> 
> Thanks,
> 
> C. 
> 
> >>
> >> Signed-off-by: Cédric Le Goater <address@hidden>
> >> ---
> >>
> >> Changes since v2:
> >>
> >>  - changed the 'icp' backlink type to be an 'Object'
> >>
> >>  hw/intc/xics.c          |  4 ++--
> >>  hw/intc/xics_spapr.c    | 20 +++++---------------
> >>  hw/ppc/spapr_cpu_core.c |  5 ++++-
> >>  target/ppc/cpu.h        |  1 +
> >>  4 files changed, 12 insertions(+), 18 deletions(-)
> >>
> >> diff --git a/hw/intc/xics.c b/hw/intc/xics.c
> >> index e740989a1162..bb485cc5b078 100644
> >> --- a/hw/intc/xics.c
> >> +++ b/hw/intc/xics.c
> >> @@ -52,7 +52,7 @@ int xics_get_cpu_index_by_dt_id(int cpu_dt_id)
> >>  void xics_cpu_destroy(XICSFabric *xi, PowerPCCPU *cpu)
> >>  {
> >>      CPUState *cs = CPU(cpu);
> >> -    ICPState *icp = xics_icp_get(xi, cs->cpu_index);
> >> +    ICPState *icp = ICP(cpu->icp);
> >>  
> >>      assert(icp);
> >>      assert(cs == icp->cs);
> >> @@ -65,7 +65,7 @@ void xics_cpu_setup(XICSFabric *xi, PowerPCCPU *cpu)
> >>  {
> >>      CPUState *cs = CPU(cpu);
> >>      CPUPPCState *env = &cpu->env;
> >> -    ICPState *icp = xics_icp_get(xi, cs->cpu_index);
> >> +    ICPState *icp = ICP(cpu->icp);
> >>      ICPStateClass *icpc;
> >>  
> >>      assert(icp);
> >> diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c
> >> index 84d24b2837a7..6144f9876ae3 100644
> >> --- a/hw/intc/xics_spapr.c
> >> +++ b/hw/intc/xics_spapr.c
> >> @@ -43,11 +43,9 @@
> >>  static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> >>                             target_ulong opcode, target_ulong *args)
> >>  {
> >> -    CPUState *cs = CPU(cpu);
> >> -    ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
> >>      target_ulong cppr = args[0];
> >>  
> >> -    icp_set_cppr(icp, cppr);
> >> +    icp_set_cppr(ICP(cpu->icp), cppr);
> >>      return H_SUCCESS;
> >>  }
> >>  
> >> @@ -69,9 +67,7 @@ static target_ulong h_ipi(PowerPCCPU *cpu, 
> >> sPAPRMachineState *spapr,
> >>  static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> >>                             target_ulong opcode, target_ulong *args)
> >>  {
> >> -    CPUState *cs = CPU(cpu);
> >> -    ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
> >> -    uint32_t xirr = icp_accept(icp);
> >> +    uint32_t xirr = icp_accept(ICP(cpu->icp));
> >>  
> >>      args[0] = xirr;
> >>      return H_SUCCESS;
> >> @@ -80,9 +76,7 @@ static target_ulong h_xirr(PowerPCCPU *cpu, 
> >> sPAPRMachineState *spapr,
> >>  static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> >>                               target_ulong opcode, target_ulong *args)
> >>  {
> >> -    CPUState *cs = CPU(cpu);
> >> -    ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
> >> -    uint32_t xirr = icp_accept(icp);
> >> +    uint32_t xirr = icp_accept(ICP(cpu->icp));
> >>  
> >>      args[0] = xirr;
> >>      args[1] = cpu_get_host_ticks();
> >> @@ -92,21 +86,17 @@ static target_ulong h_xirr_x(PowerPCCPU *cpu, 
> >> sPAPRMachineState *spapr,
> >>  static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> >>                            target_ulong opcode, target_ulong *args)
> >>  {
> >> -    CPUState *cs = CPU(cpu);
> >> -    ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
> >>      target_ulong xirr = args[0];
> >>  
> >> -    icp_eoi(icp, xirr);
> >> +    icp_eoi(ICP(cpu->icp), xirr);
> >>      return H_SUCCESS;
> >>  }
> >>  
> >>  static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> >>                              target_ulong opcode, target_ulong *args)
> >>  {
> >> -    CPUState *cs = CPU(cpu);
> >> -    ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
> >>      uint32_t mfrr;
> >> -    uint32_t xirr = icp_ipoll(icp, &mfrr);
> >> +    uint32_t xirr = icp_ipoll(ICP(cpu->icp), &mfrr);
> >>  
> >>      args[0] = xirr;
> >>      args[1] = mfrr;
> >> diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
> >> index 6883f0991ae9..f9ca3f09a0f8 100644
> >> --- a/hw/ppc/spapr_cpu_core.c
> >> +++ b/hw/ppc/spapr_cpu_core.c
> >> @@ -63,6 +63,8 @@ static void spapr_cpu_init(sPAPRMachineState *spapr, 
> >> PowerPCCPU *cpu,
> >>                             Error **errp)
> >>  {
> >>      CPUPPCState *env = &cpu->env;
> >> +    XICSFabric *xi = XICS_FABRIC(spapr);
> >> +    ICPState *icp = xics_icp_get(xi, CPU(cpu)->cpu_index);
> >>  
> >>      /* Set time-base frequency to 512 MHz */
> >>      cpu_ppc_tb_init(env, SPAPR_TIMEBASE_FREQ);
> >> @@ -80,7 +82,8 @@ static void spapr_cpu_init(sPAPRMachineState *spapr, 
> >> PowerPCCPU *cpu,
> >>          }
> >>      }
> >>  
> >> -    xics_cpu_setup(XICS_FABRIC(spapr), cpu);
> >> +    cpu->icp = OBJECT(icp);
> >> +    xics_cpu_setup(xi, cpu);
> >>  
> >>      qemu_register_reset(spapr_cpu_reset, cpu);
> >>      spapr_cpu_reset(cpu);
> >> diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
> >> index 5ee33b3fd315..774f2d717831 100644
> >> --- a/target/ppc/cpu.h
> >> +++ b/target/ppc/cpu.h
> >> @@ -1196,6 +1196,7 @@ struct PowerPCCPU {
> >>      uint32_t max_compat;
> >>      uint32_t compat_pvr;
> >>      PPCVirtualHypervisor *vhyp;
> >> +    Object *icp;
> >>  
> >>      /* Fields related to migration compatibility hacks */
> >>      bool pre_2_8_migration;
> > 
> 

-- 
David Gibson                    | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au  | minimalist, thank you.  NOT _the_ _other_
                                | _way_ _around_!
http://www.ozlabs.org/~dgibson

Attachment: signature.asc
Description: PGP signature


reply via email to

[Prev in Thread] Current Thread [Next in Thread]