bug-hurd
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH gnumach] smp: Create AP processor set and put all APs inside


From: Samuel Thibault
Subject: Re: [PATCH gnumach] smp: Create AP processor set and put all APs inside it
Date: Sun, 11 Feb 2024 11:55:22 +0100
User-agent: NeoMutt/20170609 (1.8.3)

Almudena Garcia, le dim. 11 févr. 2024 10:48:23 +0000, a ecrit:
> I don't understand which is the objective of this. In an SMP system, all 
> processor are equal in the scheduler. Why do you add this difference? 

That's what we had already discussed at the time you introduced
the initial SMP code: there ought to be races in various parts of
translators etc. So for a first, better keep all translators etc. on the
BSP and only progressively try to put stuff on the APs.

Samuel

> El domingo 11 de febrero de 2024, Damien Zammit escribió:
> > This has the effect of running with one cpu only with smp,
> > but has the ability to enable APs in userspace with the right
> > processor set RPCs.
> > ---
> >  ddb/db_print.c   |  4 +++-
> >  kern/machine.c   |  7 ++++++-
> >  kern/processor.c | 30 +++++++++++++++++++-----------
> >  kern/processor.h |  1 +
> >  4 files changed, 29 insertions(+), 13 deletions(-)
> > 
> > diff --git a/ddb/db_print.c b/ddb/db_print.c
> > index 028cb887..b5b562fd 100644
> > --- a/ddb/db_print.c
> > +++ b/ddb/db_print.c
> > @@ -347,8 +347,10 @@ db_show_all_runqs(
> >  {
> >     int i;
> >  
> > -   db_printf("Processor set runq:\t");
> > +   db_printf("Processor set def runq:\t");
> >     showrq(&default_pset.runq);
> > +   db_printf("Processor set APs runq:\t");
> > +   showrq(&ap_pset->runq);
> >     for (i = 0; i < smp_get_numcpus(); i++) {
> >         db_printf("Processor #%d runq:\t", i);
> >         showrq(&cpu_to_processor(i)->runq);
> > diff --git a/kern/machine.c b/kern/machine.c
> > index 87fbc4d1..9fbc3871 100644
> > --- a/kern/machine.c
> > +++ b/kern/machine.c
> > @@ -84,6 +84,7 @@ void cpu_up(int cpu)
> >  
> >     processor = cpu_to_processor(cpu);
> >     pset_lock(&default_pset);
> > +   pset_lock(ap_pset);
> >     s = splsched();
> >     processor_lock(processor);
> >  #if        NCPUS > 1
> > @@ -92,10 +93,14 @@ void cpu_up(int cpu)
> >     ms = &machine_slot[cpu];
> >     ms->running = TRUE;
> >     machine_info.avail_cpus++;
> > -   pset_add_processor(&default_pset, processor);
> > +   if (cpu == 0)
> > +           pset_add_processor(&default_pset, processor);
> > +   else
> > +           pset_add_processor(ap_pset, processor);
> >     processor->state = PROCESSOR_RUNNING;
> >     processor_unlock(processor);
> >     splx(s);
> > +   pset_unlock(ap_pset);
> >     pset_unlock(&default_pset);
> >  }
> >  
> > diff --git a/kern/processor.c b/kern/processor.c
> > index 76735381..d151f497 100644
> > --- a/kern/processor.c
> > +++ b/kern/processor.c
> > @@ -60,6 +60,7 @@ struct kmem_cache pset_cache;
> >  int        master_cpu;
> >  
> >  struct processor_set default_pset;
> > +struct processor_set *ap_pset;
> >  
> >  queue_head_t               all_psets;
> >  int                        all_psets_count;
> > @@ -67,16 +68,13 @@ def_simple_lock_data(, all_psets_lock);
> >  
> >  processor_t        master_processor;
> >  
> > -/*
> > - * Bootstrap the processor/pset system so the scheduler can run.
> > - */
> > -void pset_sys_bootstrap(void)
> > +static void pset_sys_bootstrap_real(struct processor_set *pset, int start, 
> > int count)
> >  {
> >     int     i;
> >  
> > -   pset_init(&default_pset);
> > -   default_pset.empty = FALSE;
> > -   for (i = 0; i < NCPUS; i++) {
> > +   pset_init(pset);
> > +   pset->empty = FALSE;
> > +   for (i = start; i < start + count; i++) {
> >             /*
> >              *      Initialize processor data structures.
> >              *      Note that cpu_to_processor is processor_ptr.
> > @@ -86,10 +84,10 @@ void pset_sys_bootstrap(void)
> >     master_processor = cpu_to_processor(master_cpu);
> >     queue_init(&all_psets);
> >     simple_lock_init(&all_psets_lock);
> > -   queue_enter(&all_psets, &default_pset, processor_set_t, all_psets);
> > -   all_psets_count = 1;
> > -   default_pset.active = TRUE;
> > -   default_pset.empty = FALSE;
> > +   queue_enter(&all_psets, pset, processor_set_t, all_psets);
> > +   all_psets_count++;
> > +   pset->active = TRUE;
> > +   pset->empty = FALSE;
> >  
> >     /*
> >      *      Note: the default_pset has a max_priority of BASEPRI_USER.
> > @@ -97,6 +95,14 @@ void pset_sys_bootstrap(void)
> >      */
> >  }
> >  
> > +/*
> > + * Bootstrap the processor/pset system so the scheduler can run.
> > + */
> > +void pset_sys_bootstrap(void)
> > +{
> > +   pset_sys_bootstrap_real(&default_pset, 0, NCPUS);
> > +}
> > +
> >  #if        MACH_HOST
> >  /*
> >   * Rest of pset system initializations.
> > @@ -124,6 +130,8 @@ void pset_sys_init(void)
> >             ipc_processor_init(processor);
> >         }
> >     }
> > +
> > +   processor_set_create(&realhost, &ap_pset, &ap_pset);
> >  }
> >  #endif     /* MACH_HOST */
> >  
> > diff --git a/kern/processor.h b/kern/processor.h
> > index fc204ffa..29abc99a 100644
> > --- a/kern/processor.h
> > +++ b/kern/processor.h
> > @@ -85,6 +85,7 @@ struct processor_set {
> >     long                    sched_load;     /* load avg for scheduler */
> >  };
> >  extern struct processor_set        default_pset;
> > +extern struct processor_set        *ap_pset;
> >  
> >  struct processor {
> >     struct run_queue runq;          /* local runq for this processor */
> > -- 
> > 2.43.0
> > 
> > 
> > 
> >
> 
> -- 
> Enviado desde mi dispositivo Sailfish

-- 
Samuel
---
Pour une évaluation indépendante, transparente et rigoureuse !
Je soutiens la Commission d'Évaluation de l'Inria.



reply via email to

[Prev in Thread] Current Thread [Next in Thread]