commit-hurd
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[hurd, commited 2/7] hurd: make lll_* take a variable instead of a ptr


From: Samuel Thibault
Subject: [hurd, commited 2/7] hurd: make lll_* take a variable instead of a ptr
Date: Wed, 16 Dec 2020 01:59:39 +0100

To be coherent with other ports, let's make lll_* take a variable, and
rename those that keep taking a ptr into __lll_*.
---
 hurd/hurdlock.c                            | 14 ++++----
 hurd/hurdlock.h                            | 38 +++++++++++++---------
 hurd/hurdpid.c                             |  2 +-
 mach/lock-intern.h                         |  6 ++--
 mach/lowlevellock.h                        | 24 ++++++++++----
 sysdeps/mach/hurd/htl/pt-mutex-lock.c      |  8 ++---
 sysdeps/mach/hurd/htl/pt-mutex-timedlock.c |  6 ++--
 sysdeps/mach/hurd/htl/pt-mutex-trylock.c   |  8 ++---
 sysdeps/mach/hurd/htl/pt-mutex-unlock.c    |  8 ++---
 sysdeps/mach/hurd/htl/pt-mutex.h           |  2 +-
 sysdeps/mach/hurd/setpgid.c                |  2 +-
 sysdeps/mach/hurd/setsid.c                 |  2 +-
 sysdeps/mach/hurd/tls.h                    |  4 +--
 sysdeps/mach/libc-lock.h                   | 12 +++----
 14 files changed, 76 insertions(+), 60 deletions(-)

diff --git a/hurd/hurdlock.c b/hurd/hurdlock.c
index 59d017fc02..3b9974bee5 100644
--- a/hurd/hurdlock.c
+++ b/hurd/hurdlock.c
@@ -51,7 +51,7 @@ __lll_abstimed_wait (void *ptr, int val,
     return EINVAL;
 
   int mlsec = compute_reltime (tsp, clk);
-  return mlsec < 0 ? KERN_TIMEDOUT : lll_timed_wait (ptr, val, mlsec, flags);
+  return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_wait (ptr, val, mlsec, flags);
 }
 
 int
@@ -62,7 +62,7 @@ __lll_abstimed_xwait (void *ptr, int lo, int hi,
     return EINVAL;
 
   int mlsec = compute_reltime (tsp, clk);
-  return mlsec < 0 ? KERN_TIMEDOUT : lll_timed_xwait (ptr, lo, hi, mlsec,
+  return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_xwait (ptr, lo, hi, mlsec,
                                                      flags);
 }
 
@@ -73,7 +73,7 @@ __lll_abstimed_lock (void *ptr,
   if (clk != CLOCK_REALTIME)
     return EINVAL;
 
-  if (lll_trylock (ptr) == 0)
+  if (__lll_trylock (ptr) == 0)
     return 0;
 
   while (1)
@@ -84,7 +84,7 @@ __lll_abstimed_lock (void *ptr,
         return EINVAL;
 
       int mlsec = compute_reltime (tsp, clk);
-      if (mlsec < 0 || lll_timed_wait (ptr, 2, mlsec, flags) == KERN_TIMEDOUT)
+      if (mlsec < 0 || __lll_timed_wait (ptr, 2, mlsec, flags) == 
KERN_TIMEDOUT)
         return ETIMEDOUT;
     }
 }
@@ -140,7 +140,7 @@ __lll_robust_lock (void *ptr, int flags)
         }
       else
         {
-          lll_timed_wait (iptr, val, wait_time, flags);
+          __lll_timed_wait (iptr, val, wait_time, flags);
           if (wait_time < MAX_WAIT_TIME)
             wait_time <<= 1;
         }
@@ -187,7 +187,7 @@ __lll_robust_abstimed_lock (void *ptr,
           else if (mlsec > wait_time)
             mlsec = wait_time;
 
-          int res = lll_timed_wait (iptr, val, mlsec, flags);
+          int res = __lll_timed_wait (iptr, val, mlsec, flags);
           if (res == KERN_TIMEDOUT)
             return ETIMEDOUT;
           else if (wait_time < MAX_WAIT_TIME)
@@ -223,7 +223,7 @@ __lll_robust_unlock (void *ptr, int flags)
     {
       if (val & LLL_WAITERS)
         {
-          lll_set_wake (ptr, 0, flags);
+          __lll_set_wake (ptr, 0, flags);
           break;
         }
       else if (atomic_compare_exchange_weak_release ((unsigned int *)ptr, 
&val, 0))
diff --git a/hurd/hurdlock.h b/hurd/hurdlock.h
index 362bcf6cc2..c1df42bea4 100644
--- a/hurd/hurdlock.h
+++ b/hurd/hurdlock.h
@@ -31,21 +31,21 @@ struct timespec;
 
 /* Wait on 64-bit address PTR, without blocking if its contents
    are different from the pair <LO, HI>.  */
-#define lll_xwait(ptr, lo, hi, flags) \
+#define __lll_xwait(ptr, lo, hi, flags) \
   __gsync_wait (__mach_task_self (), \
     (vm_offset_t)ptr, lo, hi, 0, flags | GSYNC_QUAD)
 
-/* Same as 'lll_wait', but only block for MLSEC milliseconds.  */
-#define lll_timed_wait(ptr, val, mlsec, flags) \
+/* Same as '__lll_wait', but only block for MLSEC milliseconds.  */
+#define __lll_timed_wait(ptr, val, mlsec, flags) \
   __gsync_wait (__mach_task_self (), \
     (vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED)
 
-/* Same as 'lll_xwait', but only block for MLSEC milliseconds.  */
-#define lll_timed_xwait(ptr, lo, hi, mlsec, flags) \
+/* Same as '__lll_xwait', but only block for MLSEC milliseconds.  */
+#define __lll_timed_xwait(ptr, lo, hi, mlsec, flags) \
   __gsync_wait (__mach_task_self (), (vm_offset_t)ptr, \
     lo, hi, mlsec, flags | GSYNC_TIMED | GSYNC_QUAD)
 
-/* Same as 'lll_wait', but only block until TSP elapses,
+/* Same as '__lll_wait', but only block until TSP elapses,
    using clock CLK.  */
 extern int __lll_abstimed_wait (void *__ptr, int __val,
   const struct timespec *__tsp, int __flags, int __clk);
@@ -63,6 +63,8 @@ extern int __lll_abstimed_lock (void *__ptr,
 /* Acquire the lock at PTR, but return with an error if
    the process containing the owner thread dies.  */
 extern int __lll_robust_lock (void *__ptr, int __flags);
+#define lll_robust_lock(var, flags) \
+  __lll_robust_lock (&(var), flags)
 
 /* Same as '__lll_robust_lock', but only block until TSP
    elapses, using clock CLK.  */
@@ -72,19 +74,23 @@ extern int __lll_robust_abstimed_lock (void *__ptr,
 /* Same as '__lll_robust_lock', but return with an error
    if the lock cannot be acquired without blocking.  */
 extern int __lll_robust_trylock (void *__ptr);
+#define lll_robust_trylock(var) \
+  __lll_robust_trylock (&(var))
 
 /* Wake one or more threads waiting on address PTR,
    setting its value to VAL before doing so.  */
-#define lll_set_wake(ptr, val, flags) \
+#define __lll_set_wake(ptr, val, flags) \
   __gsync_wake (__mach_task_self (), \
     (vm_offset_t)ptr, val, flags | GSYNC_MUTATE)
 
 /* Release the robust lock at PTR.  */
 extern void __lll_robust_unlock (void *__ptr, int __flags);
+#define lll_robust_unlock(var, flags) \
+  __lll_robust_unlock (&(var), flags)
 
 /* Rearrange threads waiting on address SRC to instead wait on
    DST, waking one of them if WAIT_ONE is non-zero.  */
-#define lll_requeue(src, dst, wake_one, flags) \
+#define __lll_requeue(src, dst, wake_one, flags) \
   __gsync_requeue (__mach_task_self (), (vm_offset_t)src, \
     (vm_offset_t)dst, (boolean_t)wake_one, flags)
 
@@ -93,31 +99,31 @@ extern void __lll_robust_unlock (void *__ptr, int __flags);
    every one of these calls, defaulting to CLOCK_REALTIME if
    no argument is passed.  */
 
-#define lll_abstimed_wait(ptr, val, tsp, flags, ...)   \
+#define lll_abstimed_wait(var, val, tsp, flags, ...)   \
   ({   \
      const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
-     __lll_abstimed_wait ((ptr), (val), (tsp), (flags),   \
+     __lll_abstimed_wait (&(var), (val), (tsp), (flags),   \
        __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
    })
 
-#define lll_abstimed_xwait(ptr, lo, hi, tsp, flags, ...)   \
+#define lll_abstimed_xwait(var, lo, hi, tsp, flags, ...)   \
   ({   \
      const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
-     __lll_abstimed_xwait ((ptr), (lo), (hi), (tsp), (flags),   \
+     __lll_abstimed_xwait (&(var), (lo), (hi), (tsp), (flags),   \
        __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
    })
 
-#define lll_abstimed_lock(ptr, tsp, flags, ...)   \
+#define lll_abstimed_lock(var, tsp, flags, ...)   \
   ({   \
      const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
-     __lll_abstimed_lock ((ptr), (tsp), (flags),   \
+     __lll_abstimed_lock (&(var), (tsp), (flags),   \
        __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
    })
 
-#define lll_robust_abstimed_lock(ptr, tsp, flags, ...)   \
+#define lll_robust_abstimed_lock(var, tsp, flags, ...)   \
   ({   \
      const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
-     __lll_robust_abstimed_lock ((ptr), (tsp), (flags),   \
+     __lll_robust_abstimed_lock (&(var), (tsp), (flags),   \
        __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
    })
 
diff --git a/hurd/hurdpid.c b/hurd/hurdpid.c
index dd8281cda7..5607570d4b 100644
--- a/hurd/hurdpid.c
+++ b/hurd/hurdpid.c
@@ -66,7 +66,7 @@ _S_msg_proc_newids (mach_port_t me,
 
   /* Notify any waiting user threads that the id change as been completed.  */
   ++_hurd_pids_changed_stamp;
-  lll_wake (&_hurd_pids_changed_stamp, GSYNC_BROADCAST);
+  lll_wake (_hurd_pids_changed_stamp, GSYNC_BROADCAST);
 
   return 0;
 }
diff --git a/mach/lock-intern.h b/mach/lock-intern.h
index 62faf98039..77fc4d17f8 100644
--- a/mach/lock-intern.h
+++ b/mach/lock-intern.h
@@ -57,7 +57,7 @@ extern void __spin_lock (__spin_lock_t *__lock);
 _EXTERN_INLINE void
 __spin_lock (__spin_lock_t *__lock)
 {
-  lll_lock (__lock, 0);
+  __lll_lock (__lock, 0);
 }
 #endif
 
@@ -68,7 +68,7 @@ extern void __spin_unlock (__spin_lock_t *__lock);
 _EXTERN_INLINE void
 __spin_unlock (__spin_lock_t *__lock)
 {
-  lll_unlock (__lock, 0);
+  __lll_unlock (__lock, 0);
 }
 #endif
 
@@ -79,7 +79,7 @@ extern int __spin_try_lock (__spin_lock_t *__lock);
 _EXTERN_INLINE int
 __spin_try_lock (__spin_lock_t *__lock)
 {
-  return (lll_trylock (__lock) == 0);
+  return (__lll_trylock (__lock) == 0);
 }
 #endif
 
diff --git a/mach/lowlevellock.h b/mach/lowlevellock.h
index cf67ccd589..0a22a030b4 100644
--- a/mach/lowlevellock.h
+++ b/mach/lowlevellock.h
@@ -36,16 +36,20 @@
 
 /* Wait on address PTR, without blocking if its contents
  * are different from VAL.  */
-#define lll_wait(ptr, val, flags)   \
+#define __lll_wait(ptr, val, flags)   \
   __gsync_wait (__mach_task_self (),   \
     (vm_offset_t)(ptr), (val), 0, 0, (flags))
+#define lll_wait(var, val, flags) \
+  __lll_wait (&(var), val, flags)
 
 /* Wake one or more threads waiting on address PTR.  */
-#define lll_wake(ptr, flags)   \
+#define __lll_wake(ptr, flags)   \
   __gsync_wake (__mach_task_self (), (vm_offset_t)(ptr), 0, (flags))
+#define lll_wake(var, flags) \
+  __lll_wake (&(var), flags)
 
 /* Acquire the lock at PTR.  */
-#define lll_lock(ptr, flags)   \
+#define __lll_lock(ptr, flags)   \
   ({   \
      int *__iptr = (int *)(ptr);   \
      int __flags = (flags);   \
@@ -55,27 +59,33 @@
          {   \
            if (atomic_exchange_acq (__iptr, 2) == 0)   \
              break;   \
-           lll_wait (__iptr, 2, __flags);   \
+           __lll_wait (__iptr, 2, __flags);   \
          }   \
      (void)0;   \
    })
+#define lll_lock(var, flags) \
+  __lll_lock (&(var), flags)
 
 /* Try to acquire the lock at PTR, without blocking.
    Evaluates to zero on success.  */
-#define lll_trylock(ptr)   \
+#define __lll_trylock(ptr)   \
   ({   \
      int *__iptr = (int *)(ptr);   \
      *__iptr == 0   \
        && atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) == 0 ? 0 : -1;   
\
    })
+#define lll_trylock(var) \
+  __lll_trylock (&(var))
 
 /* Release the lock at PTR.  */
-#define lll_unlock(ptr, flags)   \
+#define __lll_unlock(ptr, flags)   \
   ({   \
      int *__iptr = (int *)(ptr);   \
      if (atomic_exchange_rel (__iptr, 0) == 2)   \
-       lll_wake (__iptr, (flags));   \
+       __lll_wake (__iptr, (flags));   \
      (void)0;   \
    })
+#define lll_unlock(var, flags) \
+  __lll_unlock (&(var), flags)
 
 #endif
diff --git a/sysdeps/mach/hurd/htl/pt-mutex-lock.c 
b/sysdeps/mach/hurd/htl/pt-mutex-lock.c
index 22510701d8..ed1f6c13a1 100644
--- a/sysdeps/mach/hurd/htl/pt-mutex-lock.c
+++ b/sysdeps/mach/hurd/htl/pt-mutex-lock.c
@@ -33,7 +33,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp)
   switch (MTX_TYPE (mtxp))
     {
     case PT_MTX_NORMAL:
-      lll_lock (&mtxp->__lock, flags);
+      lll_lock (mtxp->__lock, flags);
       break;
 
     case PT_MTX_RECURSIVE:
@@ -47,7 +47,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp)
          return ret;
        }
 
-      lll_lock (&mtxp->__lock, flags);
+      lll_lock (mtxp->__lock, flags);
       mtx_set_owner (mtxp, self, flags);
       mtxp->__cnt = 1;
       break;
@@ -57,7 +57,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp)
       if (mtx_owned_p (mtxp, self, flags))
        return EDEADLK;
 
-      lll_lock (&mtxp->__lock, flags);
+      lll_lock (mtxp->__lock, flags);
       mtx_set_owner (mtxp, self, flags);
       break;
 
@@ -65,7 +65,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp)
     case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST:
     case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
       self = _pthread_self ();
-      ROBUST_LOCK (self, mtxp, __lll_robust_lock, flags);
+      ROBUST_LOCK (self, mtxp, lll_robust_lock, flags);
       break;
 
     default:
diff --git a/sysdeps/mach/hurd/htl/pt-mutex-timedlock.c 
b/sysdeps/mach/hurd/htl/pt-mutex-timedlock.c
index 198b340429..965e8b24fb 100644
--- a/sysdeps/mach/hurd/htl/pt-mutex-timedlock.c
+++ b/sysdeps/mach/hurd/htl/pt-mutex-timedlock.c
@@ -34,7 +34,7 @@ __pthread_mutex_clocklock (pthread_mutex_t *mtxp,
   switch (MTX_TYPE (mtxp))
     {
     case PT_MTX_NORMAL:
-      ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags, clockid);
+      ret = lll_abstimed_lock (mtxp->__lock, tsp, flags, clockid);
       break;
 
     case PT_MTX_RECURSIVE:
@@ -47,7 +47,7 @@ __pthread_mutex_clocklock (pthread_mutex_t *mtxp,
          ++mtxp->__cnt;
          ret = 0;
        }
-      else if ((ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags, clockid)) 
== 0)
+      else if ((ret = lll_abstimed_lock (mtxp->__lock, tsp, flags, clockid)) 
== 0)
        {
          mtx_set_owner (mtxp, self, flags);
          mtxp->__cnt = 1;
@@ -59,7 +59,7 @@ __pthread_mutex_clocklock (pthread_mutex_t *mtxp,
       self = _pthread_self ();
       if (mtx_owned_p (mtxp, self, flags))
        ret = EDEADLK;
-      else if ((ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags, clockid)) 
== 0)
+      else if ((ret = lll_abstimed_lock (mtxp->__lock, tsp, flags, clockid)) 
== 0)
        mtx_set_owner (mtxp, self, flags);
 
       break;
diff --git a/sysdeps/mach/hurd/htl/pt-mutex-trylock.c 
b/sysdeps/mach/hurd/htl/pt-mutex-trylock.c
index f883ec3f30..62183b0299 100644
--- a/sysdeps/mach/hurd/htl/pt-mutex-trylock.c
+++ b/sysdeps/mach/hurd/htl/pt-mutex-trylock.c
@@ -32,7 +32,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp)
   switch (MTX_TYPE (mtxp))
     {
     case PT_MTX_NORMAL:
-      ret = lll_trylock (&mtxp->__lock);
+      ret = lll_trylock (mtxp->__lock);
       if (ret)
        ret = EBUSY;
       break;
@@ -47,7 +47,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp)
          ++mtxp->__cnt;
          ret = 0;
        }
-      else if ((ret = lll_trylock (&mtxp->__lock)) == 0)
+      else if ((ret = lll_trylock (mtxp->__lock)) == 0)
        {
          mtx_set_owner (mtxp, self, mtxp->__flags);
          mtxp->__cnt = 1;
@@ -59,7 +59,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp)
 
     case PT_MTX_ERRORCHECK:
       self = _pthread_self ();
-      if ((ret = lll_trylock (&mtxp->__lock)) == 0)
+      if ((ret = lll_trylock (mtxp->__lock)) == 0)
        mtx_set_owner (mtxp, self, mtxp->__flags);
       else
        ret = EBUSY;
@@ -69,7 +69,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp)
     case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST:
     case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
       self = _pthread_self ();
-      ROBUST_LOCK (self, mtxp, __lll_robust_trylock);
+      ROBUST_LOCK (self, mtxp, lll_robust_trylock);
       break;
 
     default:
diff --git a/sysdeps/mach/hurd/htl/pt-mutex-unlock.c 
b/sysdeps/mach/hurd/htl/pt-mutex-unlock.c
index aabe9eafbb..f2e87a7c6f 100644
--- a/sysdeps/mach/hurd/htl/pt-mutex-unlock.c
+++ b/sysdeps/mach/hurd/htl/pt-mutex-unlock.c
@@ -32,7 +32,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp)
   switch (MTX_TYPE (mtxp))
     {
     case PT_MTX_NORMAL:
-      lll_unlock (&mtxp->__lock, flags);
+      lll_unlock (mtxp->__lock, flags);
       break;
 
     case PT_MTX_RECURSIVE:
@@ -42,7 +42,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp)
       else if (--mtxp->__cnt == 0)
        {
          mtxp->__owner_id = mtxp->__shpid = 0;
-         lll_unlock (&mtxp->__lock, flags);
+         lll_unlock (mtxp->__lock, flags);
        }
 
       break;
@@ -54,7 +54,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp)
       else
        {
          mtxp->__owner_id = mtxp->__shpid = 0;
-         lll_unlock (&mtxp->__lock, flags);
+         lll_unlock (mtxp->__lock, flags);
        }
 
       break;
@@ -74,7 +74,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp)
           * state, mark it as irrecoverable. */
          mtxp->__owner_id = ((mtxp->__lock & LLL_DEAD_OWNER)
                              ? NOTRECOVERABLE_ID : 0);
-         __lll_robust_unlock (&mtxp->__lock, flags);
+         lll_robust_unlock (mtxp->__lock, flags);
        }
 
       break;
diff --git a/sysdeps/mach/hurd/htl/pt-mutex.h b/sysdeps/mach/hurd/htl/pt-mutex.h
index 578478fcaf..ead7c91c4f 100644
--- a/sysdeps/mach/hurd/htl/pt-mutex.h
+++ b/sysdeps/mach/hurd/htl/pt-mutex.h
@@ -42,7 +42,7 @@
         return EDEADLK;   \
     }   \
   \
-  ret = cb (&mtxp->__lock, ##__VA_ARGS__);   \
+  ret = cb (mtxp->__lock, ##__VA_ARGS__);   \
   if (ret == 0 || ret == EOWNERDEAD)   \
     {   \
       if (mtxp->__owner_id == ENOTRECOVERABLE)   \
diff --git a/sysdeps/mach/hurd/setpgid.c b/sysdeps/mach/hurd/setpgid.c
index 4bb90c48c7..41562b77e5 100644
--- a/sysdeps/mach/hurd/setpgid.c
+++ b/sysdeps/mach/hurd/setpgid.c
@@ -39,7 +39,7 @@ __setpgid (pid_t pid, pid_t pgid)
     /* Synchronize with the signal thread to make sure we have
        received and processed proc_newids before returning to the user.  */
     while (_hurd_pids_changed_stamp == stamp)
-      lll_wait (&_hurd_pids_changed_stamp, stamp, 0);
+      lll_wait (_hurd_pids_changed_stamp, stamp, 0);
 
   return 0;
 
diff --git a/sysdeps/mach/hurd/setsid.c b/sysdeps/mach/hurd/setsid.c
index b297473a86..f5c95a334e 100644
--- a/sysdeps/mach/hurd/setsid.c
+++ b/sysdeps/mach/hurd/setsid.c
@@ -56,7 +56,7 @@ __setsid (void)
         returned by `getpgrp ()' in other threads) has been updated before
         we return.  */
       while (_hurd_pids_changed_stamp == stamp)
-        lll_wait (&_hurd_pids_changed_stamp, stamp, 0);
+        lll_wait (_hurd_pids_changed_stamp, stamp, 0);
     }
 
   HURD_CRITICAL_END;
diff --git a/sysdeps/mach/hurd/tls.h b/sysdeps/mach/hurd/tls.h
index a6a3586785..a0d70c2026 100644
--- a/sysdeps/mach/hurd/tls.h
+++ b/sysdeps/mach/hurd/tls.h
@@ -60,7 +60,7 @@
 #define THREAD_GSCOPE_RESET_FLAG() \
   do                                                                         \
     if (atomic_exchange_and_add_rel (&GL(dl_thread_gscope_count), -1) == 1)   \
-      lll_wake (&GL(dl_thread_gscope_count), 0);                             \
+      lll_wake (GL(dl_thread_gscope_count), 0);                                
      \
   while (0)
 #define THREAD_GSCOPE_WAIT() \
   do                                                                         \
@@ -68,7 +68,7 @@
       int count;                                                             \
       atomic_write_barrier ();                                               \
       while ((count = GL(dl_thread_gscope_count)))                           \
-        lll_wait (&GL(dl_thread_gscope_count), count, 0);                    \
+        lll_wait (GL(dl_thread_gscope_count), count, 0);                     \
     }                                                                        \
   while (0)
 
diff --git a/sysdeps/mach/libc-lock.h b/sysdeps/mach/libc-lock.h
index 3993a57b26..d9a2c42ebe 100644
--- a/sysdeps/mach/libc-lock.h
+++ b/sysdeps/mach/libc-lock.h
@@ -74,14 +74,14 @@ typedef struct __libc_lock_recursive_opaque__ 
__libc_lock_recursive_t;
 
 /* Lock the named lock variable.  */
 #define __libc_lock_lock(NAME)   \
-  ({ lll_lock (&(NAME), 0); 0; })
+  ({ lll_lock ((NAME), 0); 0; })
 
 /* Lock the named lock variable.  */
-#define __libc_lock_trylock(NAME) lll_trylock (&(NAME))
+#define __libc_lock_trylock(NAME) lll_trylock (NAME)
 
 /* Unlock the named lock variable.  */
 #define __libc_lock_unlock(NAME)   \
-  ({ lll_unlock (&(NAME), 0); 0; })
+  ({ lll_unlock ((NAME), 0); 0; })
 
 #define __libc_lock_define_recursive(CLASS,NAME) \
   CLASS __libc_lock_recursive_t NAME;
@@ -111,7 +111,7 @@ typedef struct __libc_lock_recursive_opaque__ 
__libc_lock_recursive_t;
      int __r = 0;   \
      if (__self == __lock->owner)   \
        ++__lock->cnt;   \
-     else if ((__r = lll_trylock (&__lock->lock)) == 0)   \
+     else if ((__r = lll_trylock (__lock->lock)) == 0)   \
        __lock->owner = __self, __lock->cnt = 1;   \
      __r;   \
    })
@@ -122,7 +122,7 @@ typedef struct __libc_lock_recursive_opaque__ 
__libc_lock_recursive_t;
      void *__self = __libc_lock_owner_self ();   \
      if (__self != __lock->owner)   \
        {   \
-         lll_lock (&__lock->lock, 0);   \
+         lll_lock (__lock->lock, 0);   \
          __lock->owner = __self;   \
        }   \
      ++__lock->cnt;   \
@@ -135,7 +135,7 @@ typedef struct __libc_lock_recursive_opaque__ 
__libc_lock_recursive_t;
      if (--__lock->cnt == 0)   \
        {   \
          __lock->owner = 0;   \
-         lll_unlock (&__lock->lock, 0);   \
+         lll_unlock (__lock->lock, 0);   \
        }   \
    })
 
-- 
2.29.2




reply via email to

[Prev in Thread] Current Thread [Next in Thread]