bug-hurd
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 3/5 gnumach] Make curr_ipl[] per cpu


From: Damien Zammit
Subject: [PATCH 3/5 gnumach] Make curr_ipl[] per cpu
Date: Mon, 13 Feb 2023 08:49:52 +0000

---
 i386/i386/cpu_number.h              | 13 ++++-------
 i386/i386/fpu.c                     |  4 ++--
 i386/i386/ipl.h                     |  2 +-
 i386/i386/pic.c                     |  6 +++--
 i386/i386/spl.S                     | 34 ++++++++++++++++++-----------
 i386/i386at/ioapic.c                |  7 ++++--
 linux/dev/arch/i386/kernel/irq.c    |  4 ++--
 linux/dev/include/asm-i386/system.h |  5 +++--
 x86_64/spl.S                        | 33 +++++++++++++++++-----------
 xen/evt.c                           |  7 ++++--
 10 files changed, 67 insertions(+), 48 deletions(-)

diff --git a/i386/i386/cpu_number.h b/i386/i386/cpu_number.h
index b6c3a629..a5658471 100644
--- a/i386/i386/cpu_number.h
+++ b/i386/i386/cpu_number.h
@@ -32,23 +32,18 @@

 #if    NCPUS > 1

-/* More-specific code must define cpu_number() and CPU_NUMBER.  */
 #ifdef __i386__
 #define        CX(addr, reg)   addr(,reg,4)
+#endif
+#ifdef __x86_64__
+#define        CX(addr, reg)   addr(,reg,8)
+#endif

 #define        CPU_NUMBER(reg) \
        movl    %cs:lapic, reg          ;\
        movl    %cs:APIC_ID(reg), reg   ;\
        shrl    $24, reg                ;\

-
-#endif
-#ifdef __x86_64__
-#define        CX(addr, reg)   addr(,reg,8)
-#warning Missing CPU_NUMBER() for 64 bit
-#define CPU_NUMBER(reg)
-#endif
-
 #ifndef __ASSEMBLER__
 #include "kern/cpu_number.h"
 int cpu_number(void);
diff --git a/i386/i386/fpu.c b/i386/i386/fpu.c
index 36bdb41d..fefe5e49 100644
--- a/i386/i386/fpu.c
+++ b/i386/i386/fpu.c
@@ -60,8 +60,8 @@
 #include <i386/ipl.h>
 #define ASSERT_IPL(L) \
 { \
-      if (curr_ipl != L) { \
-             printf("IPL is %d, expected %d\n", curr_ipl, L); \
+      if (curr_ipl[cpu_number()] != L) { \
+             printf("IPL is %d, expected %d\n", curr_ipl[cpu_number()], L); \
              panic("fpu: wrong ipl"); \
       } \
 }
diff --git a/i386/i386/ipl.h b/i386/i386/ipl.h
index 20e7428b..6e59b368 100644
--- a/i386/i386/ipl.h
+++ b/i386/i386/ipl.h
@@ -76,7 +76,7 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 typedef void (*interrupt_handler_fn)(int);
 extern interrupt_handler_fn ivect[];
 extern int     iunit[];
-extern spl_t   curr_ipl;
+extern spl_t   curr_ipl[NCPUS];
 #endif /* __ASSEMBLER__ */
 #endif /* KERNEL */

diff --git a/i386/i386/pic.c b/i386/i386/pic.c
index 4d51a535..2431c838 100644
--- a/i386/i386/pic.c
+++ b/i386/i386/pic.c
@@ -74,7 +74,7 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 #include <i386/machspl.h>
 #include <i386/pio.h>

-spl_t  curr_ipl;
+spl_t  curr_ipl[NCPUS] = {0};
 int    curr_pic_mask;

 int    iunit[NINTR] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
@@ -112,8 +112,10 @@ picinit(void)
        /*
        ** 0. Initialise the current level to match cli()
        */
+       int i;

-       curr_ipl = SPLHI;
+       for (i = 0; i < NCPUS; i++)
+               curr_ipl[i] = SPLHI;
        curr_pic_mask = 0;

        /*
diff --git a/i386/i386/spl.S b/i386/i386/spl.S
index 215142c9..ee359ca1 100644
--- a/i386/i386/spl.S
+++ b/i386/i386/spl.S
@@ -21,6 +21,7 @@
 #include <i386/ipl.h>
 #include <i386/i386asm.h>
 #include <i386/xen.h>
+#include <i386/cpu_number.h>

 #if NCPUS > 1
 #define mb lock; addl $0,(%esp)
@@ -46,7 +47,8 @@ lock  orl     $1,hyp_shared_info+CPU_PENDING_SEL; /* Yes, 
activate it */ \

 ENTRY(spl0)
        mb;
-       movl    EXT(curr_ipl),%eax      /* save current ipl */
+       CPU_NUMBER(%edx)
+       movl    CX(EXT(curr_ipl),%edx),%eax     /* save current ipl */
        pushl   %eax
        cli                             /* disable interrupts */
 #ifdef LINUX_DEV
@@ -74,9 +76,10 @@ ENTRY(spl0)
 #endif
        cli                             /* disable interrupts */
 1:
-       cmpl    $(SPL0),EXT(curr_ipl)   /* are we at spl0? */
-       je      1f                      /* yes, all done */
-       movl    $(SPL0),EXT(curr_ipl)   /* set ipl */
+       CPU_NUMBER(%edx)
+       cmpl    $(SPL0),CX(EXT(curr_ipl),%edx)  /* are we at spl0? */
+       je      1f                              /* yes, all done */
+       movl    $(SPL0),CX(EXT(curr_ipl),%edx)  /* set ipl */
 #ifdef MACH_XEN
        movl    EXT(int_mask)+SPL0*4,%eax
                                        /* get xen mask */
@@ -119,16 +122,17 @@ ENTRY(spl7)
        mb;
        /* just clear IF */
        cli
+       CPU_NUMBER(%edx)
        movl    $SPL7,%eax
-       xchgl   EXT(curr_ipl),%eax
+       xchgl   CX(EXT(curr_ipl),%edx),%eax
        ret

 ENTRY(splx)
        movl    S_ARG0,%edx             /* get ipl */
-
+       CPU_NUMBER(%eax)
 #if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
        /* First make sure that if we're exitting from ipl7, IF is still 
cleared */
-       cmpl    $SPL7,EXT(curr_ipl)     /* from ipl7? */
+       cmpl    $SPL7,CX(EXT(curr_ipl),%eax)    /* from ipl7? */
        jne     0f
        pushfl
        popl    %eax
@@ -140,7 +144,8 @@ ENTRY(splx)
 #endif /* (MACH_KDB || MACH_TTD) && !MACH_XEN */
        testl   %edx,%edx               /* spl0? */
        jz      EXT(spl0)               /* yes, handle specially */
-       cmpl    EXT(curr_ipl),%edx      /* same ipl as current? */
+       CPU_NUMBER(%eax)
+       cmpl    CX(EXT(curr_ipl),%eax),%edx     /* same ipl as current? */
        jne     spl                     /* no */
        cmpl    $SPL7,%edx              /* spl7? */
        je      1f                      /* to ipl7, don't enable interrupts */
@@ -188,9 +193,10 @@ splx_cli:
 1:
        xorl    %edx,%edx               /* edx = ipl 0 */
 2:
-       cmpl    EXT(curr_ipl),%edx      /* same ipl as current? */
-       je      1f                      /* yes, all done */
-       movl    %edx,EXT(curr_ipl)      /* set ipl */
+       CPU_NUMBER(%eax)
+       cmpl    CX(EXT(curr_ipl),%eax),%edx     /* same ipl as current? */
+       je      1f                              /* yes, all done */
+       movl    %edx,CX(EXT(curr_ipl),%eax)     /* set ipl */
 #ifdef MACH_XEN
        movl    EXT(int_mask)(,%edx,4),%eax
                                        /* get int mask */
@@ -206,9 +212,10 @@ splx_cli:
        .align  TEXT_ALIGN
        .globl  spl
 spl:
+       CPU_NUMBER(%eax)
 #if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
        /* First make sure that if we're exitting from ipl7, IF is still 
cleared */
-       cmpl    $SPL7,EXT(curr_ipl)     /* from ipl7? */
+       cmpl    $SPL7,CX(EXT(curr_ipl),%eax)    /* from ipl7? */
        jne     0f
        pushfl
        popl    %eax
@@ -225,7 +232,8 @@ spl:
                                        /* get int mask */
 #endif
        cli                             /* disable interrupts */
-       xchgl   EXT(curr_ipl),%edx      /* set ipl */
+       CPU_NUMBER(%edx)
+       xchgl   CX(EXT(curr_ipl),%edx),%edx     /* set ipl */
 #ifdef MACH_XEN
        XEN_SETMASK()                   /* program PICs with new mask */
 #endif
diff --git a/i386/i386at/ioapic.c b/i386/i386at/ioapic.c
index f7b0d1d3..24e04653 100644
--- a/i386/i386at/ioapic.c
+++ b/i386/i386at/ioapic.c
@@ -37,7 +37,7 @@ int duplicate_pin;
 uint32_t lapic_timer_val = 0;
 uint32_t calibrated_ticks = 0;

-spl_t curr_ipl;
+spl_t curr_ipl[NCPUS] = {0};

 int iunit[NINTR] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
                     16, 17, 18, 19, 20, 21, 22, 23};
@@ -76,8 +76,11 @@ interrupt_handler_fn ivect[NINTR] = {
 void
 picdisable(void)
 {
+    int i;
+
     asm("cli");
-    curr_ipl = SPLHI;
+    for (i = 0; i < NCPUS; i++)
+        curr_ipl[i] = SPLHI;

     /*
     ** Disable PIC
diff --git a/linux/dev/arch/i386/kernel/irq.c b/linux/dev/arch/i386/kernel/irq.c
index c10888e3..73802c45 100644
--- a/linux/dev/arch/i386/kernel/irq.c
+++ b/linux/dev/arch/i386/kernel/irq.c
@@ -343,7 +343,7 @@ probe_irq_on (void)
   unsigned i, irqs = 0;
   unsigned long delay;

-  assert (curr_ipl == 0);
+  assert (curr_ipl[cpu_number()] == 0);

   /*
    * Allocate all available IRQs.
@@ -374,7 +374,7 @@ probe_irq_off (unsigned long irqs)
 {
   unsigned int i;

-  assert (curr_ipl == 0);
+  assert (curr_ipl[cpu_number()] == 0);

   irqs &= linux_pic_mask;

diff --git a/linux/dev/include/asm-i386/system.h 
b/linux/dev/include/asm-i386/system.h
index 41eb65a4..ac091e27 100644
--- a/linux/dev/include/asm-i386/system.h
+++ b/linux/dev/include/asm-i386/system.h
@@ -1,7 +1,8 @@
 #ifndef __ASM_SYSTEM_H
 #define __ASM_SYSTEM_H

-#include <i386/ipl.h> /* curr_ipl, splx */
+#include <i386/ipl.h> /* curr_ipl[], splx */
+#include <i386/cpu_number.h>

 #include <asm/segment.h>

@@ -225,7 +226,7 @@ static inline unsigned long __xchg(unsigned long x, void * 
ptr, int size)
 #define mb()  __asm__ __volatile__ (""   : : :"memory")
 #define __sti() __asm__ __volatile__ ("sti": : :"memory")
 #define __cli() __asm__ __volatile__ ("cli": : :"memory")
-#define __save_flags(x) (x = ((curr_ipl > 0) ? 0 : (1 << 9)))
+#define __save_flags(x) (x = ((curr_ipl[cpu_number()] > 0) ? 0 : (1 << 9)))
 #define __restore_flags(x) splx((x & (1 << 9)) ? 0 : 7)

 #ifdef __SMP__
diff --git a/x86_64/spl.S b/x86_64/spl.S
index 0c2c50cb..89626280 100644
--- a/x86_64/spl.S
+++ b/x86_64/spl.S
@@ -46,7 +46,8 @@ lock  orl     $1,hyp_shared_info+CPU_PENDING_SEL; /* Yes, 
activate it */ \

 ENTRY(spl0)
        mb;
-       movl    EXT(curr_ipl),%eax      /* save current ipl */
+       CPU_NUMBER(%edx)
+       movl    CX(EXT(curr_ipl),%edx),%eax     /* save current ipl */
        pushq   %rax
        cli                             /* disable interrupts */
 #ifdef LINUX_DEV
@@ -74,9 +75,10 @@ ENTRY(spl0)
 #endif
        cli                             /* disable interrupts */
 1:
-       cmpl    $(SPL0),EXT(curr_ipl)   /* are we at spl0? */
-       je      1f                      /* yes, all done */
-       movl    $(SPL0),EXT(curr_ipl)   /* set ipl */
+       CPU_NUMBER(%edx)
+       cmpl    $(SPL0),CX(EXT(curr_ipl),%edx)  /* are we at spl0? */
+       je      1f                              /* yes, all done */
+       movl    $(SPL0),CX(EXT(curr_ipl),%edx)  /* set ipl */
 #ifdef MACH_XEN
        movl    EXT(int_mask)+SPL0*4,%eax
                                        /* get xen mask */
@@ -119,16 +121,17 @@ ENTRY(spl7)
        mb;
        /* just clear IF */
        cli
+       CPU_NUMBER(%edx)
        movl    $SPL7,%eax
-       xchgl   EXT(curr_ipl),%eax
+       xchgl   CX(EXT(curr_ipl),%edx),%eax
        ret

 ENTRY(splx)
        movq    S_ARG0,%rdx             /* get ipl */
-
+       CPU_NUMBER(%eax)
 #if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
        /* First make sure that if we're exitting from ipl7, IF is still 
cleared */
-       cmpl    $SPL7,EXT(curr_ipl)     /* from ipl7? */
+       cmpl    $SPL7,CX(EXT(curr_ipl),%eax)    /* from ipl7? */
        jne     0f
        pushfq
        popq    %rax
@@ -140,7 +143,8 @@ ENTRY(splx)
 #endif /* (MACH_KDB || MACH_TTD) && !MACH_XEN */
        testl   %edx,%edx               /* spl0? */
        jz      EXT(spl0)               /* yes, handle specially */
-       cmpl    EXT(curr_ipl),%edx      /* same ipl as current? */
+       CPU_NUMBER(%eax)
+       cmpl    CX(EXT(curr_ipl),%eax),%edx     /* same ipl as current? */
        jne     spl                     /* no */
        cmpl    $SPL7,%edx              /* spl7? */
        je      1f                      /* to ipl7, don't enable interrupts */
@@ -188,9 +192,10 @@ splx_cli:
 1:
        xorl    %edx,%edx               /* edx = ipl 0 */
 2:
-       cmpl    EXT(curr_ipl),%edx      /* same ipl as current? */
-       je      1f                      /* yes, all done */
-       movl    %edx,EXT(curr_ipl)      /* set ipl */
+       CPU_NUMBER(%eax)
+       cmpl    CX(EXT(curr_ipl),%eax),%edx     /* same ipl as current? */
+       je      1f                              /* yes, all done */
+       movl    %edx,CX(EXT(curr_ipl),%eax)     /* set ipl */
 #ifdef MACH_XEN
        movl    EXT(int_mask)(,%edx,4),%eax
                                        /* get int mask */
@@ -206,9 +211,10 @@ splx_cli:
        .align  TEXT_ALIGN
        .globl  spl
 spl:
+       CPU_NUMBER(%eax)
 #if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
        /* First make sure that if we're exitting from ipl7, IF is still 
cleared */
-       cmpl    $SPL7,EXT(curr_ipl)     /* from ipl7? */
+       cmpl    $SPL7,CX(EXT(curr_ipl),%eax)    /* from ipl7? */
        jne     0f
        pushfq
        popq    %rax
@@ -225,7 +231,8 @@ spl:
                                        /* get int mask */
 #endif
        cli                             /* disable interrupts */
-       xchgl   EXT(curr_ipl),%edx      /* set ipl */
+       CPU_NUMBER(%eax)
+       xchgl   CX(EXT(curr_ipl),%eax),%edx     /* set ipl */
 #ifdef MACH_XEN
        XEN_SETMASK()                   /* program PICs with new mask */
 #endif
diff --git a/xen/evt.c b/xen/evt.c
index cec78c0d..e0c93744 100644
--- a/xen/evt.c
+++ b/xen/evt.c
@@ -28,7 +28,7 @@
 #define NEVNT (sizeof(unsigned long) * sizeof(unsigned long) * 8)
 int    int_mask[NSPL];

-spl_t curr_ipl;
+spl_t curr_ipl[NCPUS];

 interrupt_handler_fn ivect[NEVNT];
 int intpri[NEVNT];
@@ -92,8 +92,11 @@ extern void hyp_callback(void);
 extern void hyp_failsafe_callback(void);

 void hyp_intrinit(void) {
+       int i;
+
        form_int_mask();
-       curr_ipl = SPLHI;
+       for (i = 0; i < NCPUS; i++)
+               curr_ipl[i] = SPLHI;
        hyp_shared_info.evtchn_mask[0] = int_mask[SPLHI];
 #ifdef __i386__
        hyp_set_callbacks(KERNEL_CS, hyp_callback,
--
2.34.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]