qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC PATCH v2.1 10/12] spapr: CPU hotplug support


From: Bharata B Rao
Subject: Re: [Qemu-devel] [RFC PATCH v2.1 10/12] spapr: CPU hotplug support
Date: Thu, 5 May 2016 14:52:44 +0530
User-agent: Mutt/1.5.23 (2014-03-12)

On Tue, Apr 05, 2016 at 06:47:16PM -0500, Michael Roth wrote:
> Quoting Bharata B Rao (2016-03-31 03:39:19)
> > Set up device tree entries for the hotplugged CPU core and use the
> > exising RTAS event logging infrastructure to send CPU hotplug notification
> > to the guest.
> > 
> > Signed-off-by: Bharata B Rao <address@hidden>
> > ---
> >  hw/ppc/spapr.c                  | 58 ++++++++++++++++++++++++++++++++++
> >  hw/ppc/spapr_cpu_core.c         | 70 
> > +++++++++++++++++++++++++++++++++++++++++
> >  hw/ppc/spapr_events.c           |  3 ++
> >  hw/ppc/spapr_rtas.c             | 24 ++++++++++++++
> >  include/hw/ppc/spapr.h          |  2 ++
> >  include/hw/ppc/spapr_cpu_core.h |  2 ++
> >  6 files changed, 159 insertions(+)
> > 
> > diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> > index 1ead043..1a5dbd9 100644
> > --- a/hw/ppc/spapr.c
> > +++ b/hw/ppc/spapr.c
> > @@ -603,6 +603,16 @@ static void spapr_populate_cpu_dt(CPUState *cs, void 
> > *fdt, int offset,
> >      size_t page_sizes_prop_size;
> >      uint32_t vcpus_per_socket = smp_threads * smp_cores;
> >      uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
> > +    sPAPRDRConnector *drc;
> > +    sPAPRDRConnectorClass *drck;
> > +    int drc_index;
> > +
> > +    drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index);
> > +    if (drc) {
> > +        drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
> > +        drc_index = drck->get_index(drc);
> > +        _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", 
> > drc_index)));
> > +    }
> > 
> >      /* Note: we keep CI large pages off for now because a 64K capable guest
> >       * provisioned with large pages might otherwise try to map a qemu
> > @@ -987,6 +997,16 @@ static void spapr_finalize_fdt(sPAPRMachineState 
> > *spapr,
> >          _FDT(spapr_drc_populate_dt(fdt, 0, NULL, 
> > SPAPR_DR_CONNECTOR_TYPE_LMB));
> >      }
> > 
> > +    if (smc->dr_cpu_enabled) {
> > +        int offset = fdt_path_offset(fdt, "/cpus");
> > +        ret = spapr_drc_populate_dt(fdt, offset, NULL,
> > +                                    SPAPR_DR_CONNECTOR_TYPE_CPU);
> > +        if (ret < 0) {
> > +            error_report("Couldn't set up CPU DR device tree properties");
> > +            exit(1);
> > +        }
> > +    }
> > +
> >      _FDT((fdt_pack(fdt)));
> > 
> >      if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
> > @@ -1622,6 +1642,8 @@ static void spapr_boot_set(void *opaque, const char 
> > *boot_device,
> >  void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu, Error 
> > **errp)
> >  {
> >      CPUPPCState *env = &cpu->env;
> > +    CPUState *cs = CPU(cpu);
> > +    int i;
> > 
> >      /* Set time-base frequency to 512 MHz */
> >      cpu_ppc_tb_init(env, TIMEBASE_FREQ);
> > @@ -1646,6 +1668,14 @@ void spapr_cpu_init(sPAPRMachineState *spapr, 
> > PowerPCCPU *cpu, Error **errp)
> >          }
> >      }
> > 
> > +    /* Set NUMA node for the added CPUs  */
> > +    for (i = 0; i < nb_numa_nodes; i++) {
> > +        if (test_bit(cs->cpu_index, numa_info[i].node_cpu)) {
> > +            cs->numa_node = i;
> > +            break;
> > +        }
> > +    }
> > +
> >      xics_cpu_setup(spapr->icp, cpu);
> > 
> >      qemu_register_reset(spapr_cpu_reset, cpu);
> > @@ -1825,6 +1855,11 @@ static void ppc_spapr_init(MachineState *machine)
> > 
> >          for (i = 0; i < spapr_max_cores; i++) {
> >              int core_dt_id = i * smt;
> > +            sPAPRDRConnector *drc =
> > +                spapr_dr_connector_new(OBJECT(spapr),
> > +                                       SPAPR_DR_CONNECTOR_TYPE_CPU, 
> > core_dt_id);
> > +
> > +            qemu_register_reset(spapr_drc_reset, drc);
> > 
> >              if (i < spapr_cores) {
> >                  char *type = spapr_get_cpu_core_type(machine->cpu_model);
> > @@ -2247,6 +2282,27 @@ out:
> >      error_propagate(errp, local_err);
> >  }
> > 
> > +void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
> > +                                    sPAPRMachineState *spapr)
> > +{
> > +    PowerPCCPU *cpu = POWERPC_CPU(cs);
> > +    DeviceClass *dc = DEVICE_GET_CLASS(cs);
> > +    int id = ppc_get_vcpu_dt_id(cpu);
> > +    void *fdt;
> > +    int offset, fdt_size;
> > +    char *nodename;
> > +
> > +    fdt = create_device_tree(&fdt_size);
> > +    nodename = g_strdup_printf("address@hidden", dc->fw_name, id);
> > +    offset = fdt_add_subnode(fdt, 0, nodename);
> > +
> > +    spapr_populate_cpu_dt(cs, fdt, offset, spapr);
> > +    g_free(nodename);
> > +
> > +    *fdt_offset = offset;
> > +    return fdt;
> > +}
> > +
> >  static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
> >                                        DeviceState *dev, Error **errp)
> >  {
> > @@ -2287,6 +2343,8 @@ static void spapr_machine_device_plug(HotplugHandler 
> > *hotplug_dev,
> >          }
> > 
> >          spapr_memory_plug(hotplug_dev, dev, node, errp);
> > +    } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
> > +        spapr_core_plug(hotplug_dev, dev, errp);
> >      }
> >  }
> > 
> > diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
> > index 640d143..a9ba843 100644
> > --- a/hw/ppc/spapr_cpu_core.c
> > +++ b/hw/ppc/spapr_cpu_core.c
> > @@ -18,6 +18,7 @@
> >  void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
> >                           Error **errp)
> >  {
> > +    sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
> >      sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
> >      int spapr_max_cores = max_cpus / smp_threads;
> >      int index;
> > @@ -25,6 +26,11 @@ void spapr_core_pre_plug(HotplugHandler *hotplug_dev, 
> > DeviceState *dev,
> >      Error *local_err = NULL;
> >      CPUCore *cc = CPU_CORE(dev);
> > 
> > +    if (!smc->dr_cpu_enabled && dev->hotplugged) {
> > +        error_setg(&local_err, "CPU hotplug not supported for this 
> > machine");
> > +        goto out;
> > +    }
> > +
> >      if (cc->threads != smp_threads) {
> >          error_setg(&local_err, "threads must be %d", smp_threads);
> >          goto out;
> > @@ -49,6 +55,70 @@ out:
> >      error_propagate(errp, local_err);
> >  }
> > 
> > +void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
> > +                     Error **errp)
> > +{
> > +    sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
> > +    sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
> > +    sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
> > +    CPUCore *cc = CPU_CORE(dev);
> > +    CPUState *cs = CPU(&core->threads[0]);
> > +    sPAPRDRConnector *drc;
> > +    sPAPRDRConnectorClass *drck;
> > +    Error *local_err = NULL;
> > +    void *fdt = NULL;
> > +    int fdt_offset = 0;
> > +    int index;
> > +    int smt = kvmppc_smt_threads();
> > +
> > +    drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, cc->core);
> > +    index = cc->core / smt;
> > +    spapr->cores[index] = OBJECT(dev);
> > +
> > +    if (!smc->dr_cpu_enabled) {
> > +        /*
> > +         * This is a cold plugged CPU core but the machine doesn't support
> > +         * DR. So skip the hotplug path ensuring that the core is brought
> > +         * up online with out an associated DR connector.
> > +         */
> > +        return;
> > +    }
> > +
> > +    g_assert(drc);
> > +
> > +    /*
> > +     * Setup CPU DT entries only for hotplugged CPUs. For boot time or
> > +     * coldplugged CPUs DT entries are setup in spapr_finalize_fdt().
> > +     */
> > +    if (dev->hotplugged) {
> > +        fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
> > +        dev->hotplugged = true;
> 
> This doesn't seem necessary ^

Yes, this hunk refuses to die, I just ensured that it is removed finally.

Regards,
Bharata.




reply via email to

[Prev in Thread] Current Thread [Next in Thread]