diff options
author | Ulrich Drepper <drepper@redhat.com> | 2007-08-11 18:50:51 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 2007-08-11 18:50:51 +0000 |
commit | 5bd8a24966df565ea992489eae95606bc522fe61 (patch) | |
tree | 4293de9ec1fd34c448f41d24a33e5ce1eed60812 | |
parent | 4503061fdc688c94caaf9ad4d011b0ccf574b500 (diff) | |
download | glibc-5bd8a24966df565ea992489eae95606bc522fe61.tar.gz |
* pthreadP.h (PTHREAD_ROBUST_MUTEX_PSHARED): Define.
* pthread_mutex_lock.c: Use it instead of PTHREAD_MUTEX_PSHARED when
dealing with robust mutexes.
* pthread_mutex_timedlock.c: Likewise.
* pthread_mutex_trylock.c: Likewise.
* pthread_mutex_unlock.c: Likewise.
* sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c: Likewise.
2007-08-06 Jakub Jelinek <jakub@redhat.com>
* pthreadP.h (PTHREAD_MUTEX_PSHARED_BIT): Define.
(PTHREAD_MUTEX_TYPE): Mask __kind with 127.
(PTHREAD_MUTEX_PSHARED): Define.
* pthread_mutex_init.c (__pthread_mutex_init): Set
PTHREAD_MUTEX_PSHARED_BIT for pshared or robust
mutexes.
* pthread_mutex_lock.c (LLL_MUTEX_LOCK): Take mutex as argument
instead of its __data.__lock field, pass PTHREAD_MUTEX_PSHARED
as second argument to lll_lock.
(LLL_MUTEX_TRYLOCK): Take mutex as argument
instead of its __data.__lock field.
(LLL_ROBUST_MUTEX_LOCK): Take mutex as argument instead of its
__data.__lock field, pass PTHREAD_MUTEX_PSHARED as second argument
to lll_robust_lock.
(__pthread_mutex_lock): Update LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK,
LLL_ROBUST_MUTEX_LOCK users, use PTHREAD_MUTEX_TYPE (mutex)
instead of mutex->__data.__kind directly, pass
PTHREAD_MUTEX_PSHARED (mutex) to lll_unlock and lll_futex_wait.
* pthread_mutex_trylock.c (__pthread_mutex_trylock): Use
PTHREAD_MUTEX_TYPE (mutex) instead of mutex->__data.__kind
directly, pass PTHREAD_MUTEX_PSHARED (mutex) to lll_unlock.
(pthread_mutex_timedlock): Pass PTHREAD_MUTEX_PSHARED (mutex)
to lll_timedlock, lll_robust_timedlock, lll_unlock and
lll_futex_timed_wait. Use PTHREAD_MUTEX_TYPE (mutex) instead
of mutex->__data.__kind directly.
* pthread_mutex_timedlock.c (pthread_mutex_timedlock): Pass
PTHREAD_MUTEX_PSHARED (mutex) to lll_timedlock,
lll_robust_timedlock, lll_unlock and lll_futex_timed_wait. Use
PTHREAD_MUTEX_TYPE (mutex) instead of mutex->__data.__kind directly.
* pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt): Pass
PTHREAD_MUTEX_PSHARED (mutex) to lll_unlock, lll_robust_unlock
and lll_futex_wake.
* pthread_mutex_setprioceiling.c (pthread_mutex_setprioceiling): Pass
PTHREAD_MUTEX_PSHARED (mutex) to lll_futex_wait and lll_futex_wake.
Use PTHREAD_MUTEX_TYPE (mutex) instead of mutex->__data.__kind
directly.
* sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (LLL_MUTEX_LOCK):
Take mutex as argument instead of its __data.__lock field, pass
PTHREAD_MUTEX_PSHARED as second argument to lll_cond_lock.
(LLL_MUTEX_TRYLOCK): Take mutex as argument instead of its
__data.__lock field.
(LLL_ROBUST_MUTEX_LOCK): Take mutex as argument instead of its
__data.__lock field, pass PTHREAD_MUTEX_PSHARED as second argument
to lll_robust_cond_lock.
* pthread_cond_broadcast.c (__pthread_cond_broadcast): Add pshared
variable, pass it to lll_lock, lll_unlock, lll_futex_requeue and
lll_futex_wake. Don't use lll_futex_requeue if dependent mutex
has PTHREAD_MUTEX_PSHARED_BIT bit set in its __data.__kind.
* pthread_cond_destroy.c (__pthread_cond_destroy): Add pshared
variable, pass it to lll_lock, lll_unlock, lll_futex_wake and
lll_futex_wait.
* pthread_cond_signal.c (__pthread_cond_signal): Add pshared
variable, pass it to lll_lock, lll_unlock, lll_futex_wake_unlock and
lll_futex_wake.
* pthread_cond_timedwait.c (__pthread_cond_wait): Add
pshared variable, pass it to lll_lock, lll_unlock,
lll_futex_timedwait and lll_futex_wake.
* pthread_cond_wait.c (__condvar_cleanup, __pthread_cond_wait): Add
pshared variable, pass it to lll_lock, lll_unlock, lll_futex_wait
and lll_futex_wake.
* sysdeps/unix/sysv/linux/alpha/lowlevellock.h (lll_futex_requeue,
lll_futex_wake_unlock): Add private argument, use __lll_private_flag
macro.
* sysdeps/unix/sysv/linux/ia64/lowlevellock.h (lll_futex_requeue,
lll_futex_wake_unlock): Likewise.
* sysdeps/unix/sysv/linux/powerpc/lowlevellock.h (lll_futex_requeue):
Likewise.
* sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_futex_requeue,
lll_futex_wake_unlock): Likewise.
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_futex_requeue):
Likewise.
* sysdeps/unix/sysv/linux/s390/lowlevellock.h (lll_futex_requeue,
lll_futex_wake_unlock): Likewise.
(lll_futex_wake): Fix a typo.
* sysdeps/unix/sysv/linux/pthread-pi-defines.sym (PS_BIT): Add.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
(__pthread_cond_broadcast): Pass LLL_PRIVATE to lll_* and or
FUTEX_PRIVATE_FLAG into SYS_futex op if cv is process private.
Don't use FUTEX_CMP_REQUEUE if dep_mutex is not process private.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
(__pthread_cond_signal): Pass LLL_PRIVATE to lll_* and or
FUTEX_PRIVATE_FLAG into SYS_futex op if cv is process private.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
(__pthread_cond_timedwait): Likewise.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S:
(__condvar_cleanup, __pthread_cond_wait): Likewise.
25 files changed, 421 insertions, 172 deletions
diff --git a/nptl/ChangeLog b/nptl/ChangeLog index 34fd724d9b..b81c5c5314 100644 --- a/nptl/ChangeLog +++ b/nptl/ChangeLog @@ -1,3 +1,102 @@ +2007-08-11 Ulrich Drepper <drepper@redhat.com> + + * pthreadP.h (PTHREAD_ROBUST_MUTEX_PSHARED): Define. + * pthread_mutex_lock.c: Use it instead of PTHREAD_MUTEX_PSHARED when + dealing with robust mutexes. + * pthread_mutex_timedlock.c: Likewise. + * pthread_mutex_trylock.c: Likewise. + * pthread_mutex_unlock.c: Likewise. + * sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c: Likewise. + +2007-08-06 Jakub Jelinek <jakub@redhat.com> + + * pthreadP.h (PTHREAD_MUTEX_PSHARED_BIT): Define. + (PTHREAD_MUTEX_TYPE): Mask __kind with 127. + (PTHREAD_MUTEX_PSHARED): Define. + * pthread_mutex_init.c (__pthread_mutex_init): Set + PTHREAD_MUTEX_PSHARED_BIT for pshared or robust + mutexes. + * pthread_mutex_lock.c (LLL_MUTEX_LOCK): Take mutex as argument + instead of its __data.__lock field, pass PTHREAD_MUTEX_PSHARED + as second argument to lll_lock. + (LLL_MUTEX_TRYLOCK): Take mutex as argument + instead of its __data.__lock field. + (LLL_ROBUST_MUTEX_LOCK): Take mutex as argument instead of its + __data.__lock field, pass PTHREAD_MUTEX_PSHARED as second argument + to lll_robust_lock. + (__pthread_mutex_lock): Update LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK, + LLL_ROBUST_MUTEX_LOCK users, use PTHREAD_MUTEX_TYPE (mutex) + instead of mutex->__data.__kind directly, pass + PTHREAD_MUTEX_PSHARED (mutex) to lll_unlock and lll_futex_wait. + * pthread_mutex_trylock.c (__pthread_mutex_trylock): Use + PTHREAD_MUTEX_TYPE (mutex) instead of mutex->__data.__kind + directly, pass PTHREAD_MUTEX_PSHARED (mutex) to lll_unlock. + (pthread_mutex_timedlock): Pass PTHREAD_MUTEX_PSHARED (mutex) + to lll_timedlock, lll_robust_timedlock, lll_unlock and + lll_futex_timed_wait. Use PTHREAD_MUTEX_TYPE (mutex) instead + of mutex->__data.__kind directly. + * pthread_mutex_timedlock.c (pthread_mutex_timedlock): Pass + PTHREAD_MUTEX_PSHARED (mutex) to lll_timedlock, + lll_robust_timedlock, lll_unlock and lll_futex_timed_wait. Use + PTHREAD_MUTEX_TYPE (mutex) instead of mutex->__data.__kind directly. + * pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt): Pass + PTHREAD_MUTEX_PSHARED (mutex) to lll_unlock, lll_robust_unlock + and lll_futex_wake. + * pthread_mutex_setprioceiling.c (pthread_mutex_setprioceiling): Pass + PTHREAD_MUTEX_PSHARED (mutex) to lll_futex_wait and lll_futex_wake. + Use PTHREAD_MUTEX_TYPE (mutex) instead of mutex->__data.__kind + directly. + * sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (LLL_MUTEX_LOCK): + Take mutex as argument instead of its __data.__lock field, pass + PTHREAD_MUTEX_PSHARED as second argument to lll_cond_lock. + (LLL_MUTEX_TRYLOCK): Take mutex as argument instead of its + __data.__lock field. + (LLL_ROBUST_MUTEX_LOCK): Take mutex as argument instead of its + __data.__lock field, pass PTHREAD_MUTEX_PSHARED as second argument + to lll_robust_cond_lock. + * pthread_cond_broadcast.c (__pthread_cond_broadcast): Add pshared + variable, pass it to lll_lock, lll_unlock, lll_futex_requeue and + lll_futex_wake. Don't use lll_futex_requeue if dependent mutex + has PTHREAD_MUTEX_PSHARED_BIT bit set in its __data.__kind. + * pthread_cond_destroy.c (__pthread_cond_destroy): Add pshared + variable, pass it to lll_lock, lll_unlock, lll_futex_wake and + lll_futex_wait. + * pthread_cond_signal.c (__pthread_cond_signal): Add pshared + variable, pass it to lll_lock, lll_unlock, lll_futex_wake_unlock and + lll_futex_wake. + * pthread_cond_timedwait.c (__pthread_cond_wait): Add + pshared variable, pass it to lll_lock, lll_unlock, + lll_futex_timedwait and lll_futex_wake. + * pthread_cond_wait.c (__condvar_cleanup, __pthread_cond_wait): Add + pshared variable, pass it to lll_lock, lll_unlock, lll_futex_wait + and lll_futex_wake. + * sysdeps/unix/sysv/linux/alpha/lowlevellock.h (lll_futex_requeue, + lll_futex_wake_unlock): Add private argument, use __lll_private_flag + macro. + * sysdeps/unix/sysv/linux/ia64/lowlevellock.h (lll_futex_requeue, + lll_futex_wake_unlock): Likewise. + * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h (lll_futex_requeue): + Likewise. + * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_futex_requeue, + lll_futex_wake_unlock): Likewise. + * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_futex_requeue): + Likewise. + * sysdeps/unix/sysv/linux/s390/lowlevellock.h (lll_futex_requeue, + lll_futex_wake_unlock): Likewise. + (lll_futex_wake): Fix a typo. + * sysdeps/unix/sysv/linux/pthread-pi-defines.sym (PS_BIT): Add. + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S + (__pthread_cond_broadcast): Pass LLL_PRIVATE to lll_* and or + FUTEX_PRIVATE_FLAG into SYS_futex op if cv is process private. + Don't use FUTEX_CMP_REQUEUE if dep_mutex is not process private. + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S + (__pthread_cond_signal): Pass LLL_PRIVATE to lll_* and or + FUTEX_PRIVATE_FLAG into SYS_futex op if cv is process private. + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S + (__pthread_cond_timedwait): Likewise. + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: + (__condvar_cleanup, __pthread_cond_wait): Likewise. + 2007-08-05 Jakub Jelinek <jakub@redhat.com> * sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep-cancel.h (PSEUDO): diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h index 82c0f1ecf6..17b6492ad8 100644 --- a/nptl/pthreadP.h +++ b/nptl/pthreadP.h @@ -96,9 +96,22 @@ enum PTHREAD_MUTEX_PP_ADAPTIVE_NP = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ADAPTIVE_NP }; +#define PTHREAD_MUTEX_PSHARED_BIT 128 #define PTHREAD_MUTEX_TYPE(m) \ - ((m)->__data.__kind) + ((m)->__data.__kind & 127) + +#if LLL_PRIVATE == 0 && LLL_SHARED == 128 +# define PTHREAD_MUTEX_PSHARED(m) \ + ((m)->__data.__kind & 128) +#else +# define PTHREAD_MUTEX_PSHARED(m) \ + (((m)->__data.__kind & 128) ? LLL_SHARED : LLL_PRIVATE) +#endif + +/* The kernel when waking robust mutexes on exit never uses + FUTEX_PRIVATE_FLAG FUTEX_WAKE. */ +#define PTHREAD_ROBUST_MUTEX_PSHARED(m) LLL_SHARED /* Ceiling in __data.__lock. __data.__lock is signed, so don't use the MSB bit in there, but in the mask also include that bit, diff --git a/nptl/pthread_cond_broadcast.c b/nptl/pthread_cond_broadcast.c index 5c0d76effc..22523c2973 100644 --- a/nptl/pthread_cond_broadcast.c +++ b/nptl/pthread_cond_broadcast.c @@ -32,8 +32,10 @@ int __pthread_cond_broadcast (cond) pthread_cond_t *cond; { + int pshared = (cond->__data.__mutex == (void *) ~0l) + ? LLL_SHARED : LLL_PRIVATE; /* Make sure we are alone. */ - lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_lock (cond->__data.__lock, pshared); /* Are there any waiters to be woken? */ if (cond->__data.__total_seq > cond->__data.__wakeup_seq) @@ -47,7 +49,7 @@ __pthread_cond_broadcast (cond) ++cond->__data.__broadcast_seq; /* We are done. */ - lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (cond->__data.__lock, pshared); /* Do not use requeue for pshared condvars. */ if (cond->__data.__mutex == (void *) ~0l) @@ -57,21 +59,22 @@ __pthread_cond_broadcast (cond) pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex; /* XXX: Kernel so far doesn't support requeue to PI futex. */ - if (__builtin_expect (mut->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP, - 0)) + /* XXX: Kernel so far can only requeue to the same type of futex, + in this case private (we don't requeue for pshared condvars). */ + if (__builtin_expect (mut->__data.__kind + & (PTHREAD_MUTEX_PRIO_INHERIT_NP + | PTHREAD_MUTEX_PSHARED_BIT), 0)) goto wake_all; /* lll_futex_requeue returns 0 for success and non-zero for errors. */ if (__builtin_expect (lll_futex_requeue (&cond->__data.__futex, 1, INT_MAX, &mut->__data.__lock, - futex_val), 0)) + futex_val, LLL_PRIVATE), 0)) { /* The requeue functionality is not available. */ wake_all: - lll_futex_wake (&cond->__data.__futex, INT_MAX, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_wake (&cond->__data.__futex, INT_MAX, pshared); } /* That's all. */ @@ -79,7 +82,7 @@ __pthread_cond_broadcast (cond) } /* We are done. */ - lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (cond->__data.__lock, pshared); return 0; } diff --git a/nptl/pthread_cond_destroy.c b/nptl/pthread_cond_destroy.c index 53b5cd272f..35135a68bc 100644 --- a/nptl/pthread_cond_destroy.c +++ b/nptl/pthread_cond_destroy.c @@ -26,14 +26,17 @@ int __pthread_cond_destroy (cond) pthread_cond_t *cond; { + int pshared = (cond->__data.__mutex == (void *) ~0l) + ? LLL_SHARED : LLL_PRIVATE; + /* Make sure we are alone. */ - lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_lock (cond->__data.__lock, pshared); if (cond->__data.__total_seq > cond->__data.__wakeup_seq) { /* If there are still some waiters which have not been woken up, this is an application bug. */ - lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (cond->__data.__lock, pshared); return EBUSY; } @@ -60,19 +63,16 @@ __pthread_cond_destroy (cond) { pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex; lll_futex_wake (&mut->__data.__lock, INT_MAX, - // XYZ check mutex flag - LLL_SHARED); + PTHREAD_MUTEX_PSHARED (mut)); } do { - lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (cond->__data.__lock, pshared); - lll_futex_wait (&cond->__data.__nwaiters, nwaiters, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_wait (&cond->__data.__nwaiters, nwaiters, pshared); - lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_lock (cond->__data.__lock, pshared); nwaiters = cond->__data.__nwaiters; } diff --git a/nptl/pthread_cond_signal.c b/nptl/pthread_cond_signal.c index f2de58fa1d..023bbb5e9b 100644 --- a/nptl/pthread_cond_signal.c +++ b/nptl/pthread_cond_signal.c @@ -32,8 +32,11 @@ int __pthread_cond_signal (cond) pthread_cond_t *cond; { + int pshared = (cond->__data.__mutex == (void *) ~0l) + ? LLL_SHARED : LLL_PRIVATE; + /* Make sure we are alone. */ - lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_lock (cond->__data.__lock, pshared); /* Are there any waiters to be woken? */ if (cond->__data.__total_seq > cond->__data.__wakeup_seq) @@ -45,18 +48,14 @@ __pthread_cond_signal (cond) /* Wake one. */ if (! __builtin_expect (lll_futex_wake_unlock (&cond->__data.__futex, 1, 1, &cond->__data.__lock, - // XYZ check mutex flag - LLL_SHARED), - 0)) + pshared), 0)) return 0; - lll_futex_wake (&cond->__data.__futex, 1, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_wake (&cond->__data.__futex, 1, pshared); } /* We are done. */ - lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (cond->__data.__lock, pshared); return 0; } diff --git a/nptl/pthread_cond_timedwait.c b/nptl/pthread_cond_timedwait.c index a8d95dc224..54839c015a 100644 --- a/nptl/pthread_cond_timedwait.c +++ b/nptl/pthread_cond_timedwait.c @@ -53,14 +53,17 @@ __pthread_cond_timedwait (cond, mutex, abstime) if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return EINVAL; + int pshared = (cond->__data.__mutex == (void *) ~0l) + ? LLL_SHARED : LLL_PRIVATE; + /* Make sure we are along. */ - lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_lock (cond->__data.__lock, pshared); /* Now we can release the mutex. */ int err = __pthread_mutex_unlock_usercnt (mutex, 0); if (err) { - lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (cond->__data.__lock, pshared); return err; } @@ -146,22 +149,20 @@ __pthread_cond_timedwait (cond, mutex, abstime) unsigned int futex_val = cond->__data.__futex; /* Prepare to wait. Release the condvar futex. */ - lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (cond->__data.__lock, pshared); /* Enable asynchronous cancellation. Required by the standard. */ cbuffer.oldtype = __pthread_enable_asynccancel (); /* Wait until woken by signal or broadcast. */ err = lll_futex_timed_wait (&cond->__data.__futex, - futex_val, &rt, - // XYZ check mutex flag - LLL_SHARED); + futex_val, &rt, pshared); /* Disable asynchronous cancellation. */ __pthread_disable_asynccancel (cbuffer.oldtype); /* We are going to look at shared data again, so get the lock. */ - lll_lock(cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_lock (cond->__data.__lock, pshared); /* If a broadcast happened, we are done. */ if (cbuffer.bc_seq != cond->__data.__broadcast_seq) @@ -198,12 +199,10 @@ __pthread_cond_timedwait (cond, mutex, abstime) and it can be successfully destroyed. */ if (cond->__data.__total_seq == -1ULL && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT)) - lll_futex_wake (&cond->__data.__nwaiters, 1, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_wake (&cond->__data.__nwaiters, 1, pshared); /* We are done with the condvar. */ - lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (cond->__data.__lock, pshared); /* The cancellation handling is back to normal, remove the handler. */ __pthread_cleanup_pop (&buffer, 0); diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c index 679655f8fd..670fba5736 100644 --- a/nptl/pthread_cond_wait.c +++ b/nptl/pthread_cond_wait.c @@ -43,9 +43,11 @@ __condvar_cleanup (void *arg) struct _condvar_cleanup_buffer *cbuffer = (struct _condvar_cleanup_buffer *) arg; unsigned int destroying; + int pshared = (cbuffer->cond->__data.__mutex == (void *) ~0l) + ? LLL_SHARED : LLL_PRIVATE; /* We are going to modify shared data. */ - lll_lock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_lock (cbuffer->cond->__data.__lock, pshared); if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq) { @@ -71,20 +73,16 @@ __condvar_cleanup (void *arg) if (cbuffer->cond->__data.__total_seq == -1ULL && cbuffer->cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT)) { - lll_futex_wake (&cbuffer->cond->__data.__nwaiters, 1, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_wake (&cbuffer->cond->__data.__nwaiters, 1, pshared); destroying = 1; } /* We are done. */ - lll_unlock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (cbuffer->cond->__data.__lock, pshared); /* Wake everybody to make sure no condvar signal gets lost. */ if (! destroying) - lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX, pshared); /* Get the mutex before returning unless asynchronous cancellation is in effect. */ @@ -100,15 +98,17 @@ __pthread_cond_wait (cond, mutex) struct _pthread_cleanup_buffer buffer; struct _condvar_cleanup_buffer cbuffer; int err; + int pshared = (cond->__data.__mutex == (void *) ~0l) + ? LLL_SHARED : LLL_PRIVATE; /* Make sure we are along. */ - lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_lock (cond->__data.__lock, pshared); /* Now we can release the mutex. */ err = __pthread_mutex_unlock_usercnt (mutex, 0); if (__builtin_expect (err, 0)) { - lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (cond->__data.__lock, pshared); return err; } @@ -144,21 +144,19 @@ __pthread_cond_wait (cond, mutex) unsigned int futex_val = cond->__data.__futex; /* Prepare to wait. Release the condvar futex. */ - lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (cond->__data.__lock, pshared); /* Enable asynchronous cancellation. Required by the standard. */ cbuffer.oldtype = __pthread_enable_asynccancel (); /* Wait until woken by signal or broadcast. */ - lll_futex_wait (&cond->__data.__futex, futex_val, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_wait (&cond->__data.__futex, futex_val, pshared); /* Disable asynchronous cancellation. */ __pthread_disable_asynccancel (cbuffer.oldtype); /* We are going to look at shared data again, so get the lock. */ - lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_lock (cond->__data.__lock, pshared); /* If a broadcast happened, we are done. */ if (cbuffer.bc_seq != cond->__data.__broadcast_seq) @@ -181,12 +179,10 @@ __pthread_cond_wait (cond, mutex) and it can be successfully destroyed. */ if (cond->__data.__total_seq == -1ULL && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT)) - lll_futex_wake (&cond->__data.__nwaiters, 1, - // XYZ check mutex flag - LLL_SHARED); + lll_futex_wake (&cond->__data.__nwaiters, 1, pshared); /* We are done with the condvar. */ - lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (cond->__data.__lock, pshared); /* The cancellation handling is back to normal, remove the handler. */ __pthread_cleanup_pop (&buffer, 0); diff --git a/nptl/pthread_mutex_init.c b/nptl/pthread_mutex_init.c index 96f1fb00f8..a013ca83fe 100644 --- a/nptl/pthread_mutex_init.c +++ b/nptl/pthread_mutex_init.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 + Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -120,6 +121,12 @@ __pthread_mutex_init (mutex, mutexattr) break; } + /* The kernel when waking robust mutexes on exit never uses + FUTEX_PRIVATE_FLAG FUTEX_WAKE. */ + if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED + | PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0) + mutex->__data.__kind |= PTHREAD_MUTEX_PSHARED_BIT; + /* Default values: mutex not used yet. */ // mutex->__count = 0; already done by memset // mutex->__owner = 0; already done by memset diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c index a82922e99a..ed98dfc6c3 100644 --- a/nptl/pthread_mutex_lock.c +++ b/nptl/pthread_mutex_lock.c @@ -27,9 +27,13 @@ #ifndef LLL_MUTEX_LOCK -# define LLL_MUTEX_LOCK(mutex) lll_lock (mutex, /* XYZ */ LLL_SHARED) -# define LLL_MUTEX_TRYLOCK(mutex) lll_trylock (mutex) -# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_lock (mutex, id, /* XYZ */ LLL_SHARED) +# define LLL_MUTEX_LOCK(mutex) \ + lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)) +# define LLL_MUTEX_TRYLOCK(mutex) \ + lll_trylock ((mutex)->__data.__lock) +# define LLL_ROBUST_MUTEX_LOCK(mutex, id) \ + lll_robust_lock ((mutex)->__data.__lock, id, \ + PTHREAD_ROBUST_MUTEX_PSHARED (mutex)) #endif @@ -62,7 +66,7 @@ __pthread_mutex_lock (mutex) } /* We have to get the mutex. */ - LLL_MUTEX_LOCK (mutex->__data.__lock); + LLL_MUTEX_LOCK (mutex); assert (mutex->__data.__owner == 0); mutex->__data.__count = 1; @@ -79,7 +83,7 @@ __pthread_mutex_lock (mutex) case PTHREAD_MUTEX_TIMED_NP: simple: /* Normal mutex. */ - LLL_MUTEX_LOCK (mutex->__data.__lock); + LLL_MUTEX_LOCK (mutex); assert (mutex->__data.__owner == 0); break; @@ -87,7 +91,7 @@ __pthread_mutex_lock (mutex) if (! __is_smp) goto simple; - if (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0) + if (LLL_MUTEX_TRYLOCK (mutex) != 0) { int cnt = 0; int max_cnt = MIN (MAX_ADAPTIVE_COUNT, @@ -96,7 +100,7 @@ __pthread_mutex_lock (mutex) { if (cnt++ >= max_cnt) { - LLL_MUTEX_LOCK (mutex->__data.__lock); + LLL_MUTEX_LOCK (mutex); break; } @@ -104,7 +108,7 @@ __pthread_mutex_lock (mutex) BUSY_WAIT_NOP; #endif } - while (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0); + while (LLL_MUTEX_TRYLOCK (mutex) != 0); mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; } @@ -166,16 +170,15 @@ __pthread_mutex_lock (mutex) /* Check whether we already hold the mutex. */ if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) { - if (mutex->__data.__kind - == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) + int kind = PTHREAD_MUTEX_TYPE (mutex); + if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) { THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return EDEADLK; } - if (mutex->__data.__kind - == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) + if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) { THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); @@ -191,14 +194,15 @@ __pthread_mutex_lock (mutex) } } - oldval = LLL_ROBUST_MUTEX_LOCK (mutex->__data.__lock, id); + oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id); if (__builtin_expect (mutex->__data.__owner == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) { /* This mutex is now not recoverable. */ mutex->__data.__count = 0; - lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (mutex->__data.__lock, + PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return ENOTRECOVERABLE; } @@ -410,8 +414,7 @@ __pthread_mutex_lock (mutex) if (oldval != ceilval) lll_futex_wait (&mutex->__data.__lock, ceilval | 2, - // XYZ check mutex flag - LLL_SHARED); + PTHREAD_MUTEX_PSHARED (mutex)); } while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, ceilval | 2, ceilval) diff --git a/nptl/pthread_mutex_setprioceiling.c b/nptl/pthread_mutex_setprioceiling.c index 301fb63d21..836c9a3e93 100644 --- a/nptl/pthread_mutex_setprioceiling.c +++ b/nptl/pthread_mutex_setprioceiling.c @@ -47,12 +47,13 @@ pthread_mutex_setprioceiling (mutex, prioceiling, old_ceiling) /* Check whether we already hold the mutex. */ bool locked = false; + int kind = PTHREAD_MUTEX_TYPE (mutex); if (mutex->__data.__owner == THREAD_GETMEM (THREAD_SELF, tid)) { - if (mutex->__data.__kind == PTHREAD_MUTEX_PP_ERRORCHECK_NP) + if (kind == PTHREAD_MUTEX_PP_ERRORCHECK_NP) return EDEADLK; - if (mutex->__data.__kind == PTHREAD_MUTEX_PP_RECURSIVE_NP) + if (kind == PTHREAD_MUTEX_PP_RECURSIVE_NP) locked = true; } @@ -81,8 +82,7 @@ pthread_mutex_setprioceiling (mutex, prioceiling, old_ceiling) if (oldval != ceilval) lll_futex_wait (&mutex->__data.__lock, ceilval | 2, - // XYZ check mutex flag - LLL_SHARED); + PTHREAD_MUTEX_PSHARED (mutex)); } while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, ceilval | 2, ceilval) @@ -113,8 +113,7 @@ pthread_mutex_setprioceiling (mutex, prioceiling, old_ceiling) atomic_full_barrier (); lll_futex_wake (&mutex->__data.__lock, INT_MAX, - // XYZ check mutex flag - LLL_SHARED); + PTHREAD_MUTEX_PSHARED (mutex)); return 0; } diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c index 7a0ed57eaa..4bf0efea34 100644 --- a/nptl/pthread_mutex_timedlock.c +++ b/nptl/pthread_mutex_timedlock.c @@ -57,7 +57,7 @@ pthread_mutex_timedlock (mutex, abstime) /* We have to get the mutex. */ result = lll_timedlock (mutex->__data.__lock, abstime, - /* XYZ */ LLL_SHARED); + PTHREAD_MUTEX_PSHARED (mutex)); if (result != 0) goto out; @@ -78,7 +78,7 @@ pthread_mutex_timedlock (mutex, abstime) simple: /* Normal mutex. */ result = lll_timedlock (mutex->__data.__lock, abstime, - /* XYZ */ LLL_SHARED); + PTHREAD_MUTEX_PSHARED (mutex)); break; case PTHREAD_MUTEX_ADAPTIVE_NP: @@ -95,7 +95,7 @@ pthread_mutex_timedlock (mutex, abstime) if (cnt++ >= max_cnt) { result = lll_timedlock (mutex->__data.__lock, abstime, - /* XYZ */ LLL_SHARED); + PTHREAD_MUTEX_PSHARED (mutex)); break; } @@ -152,16 +152,15 @@ pthread_mutex_timedlock (mutex, abstime) /* Check whether we already hold the mutex. */ if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) { - if (mutex->__data.__kind - == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) + int kind = PTHREAD_MUTEX_TYPE (mutex); + if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) { THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return EDEADLK; } - if (mutex->__data.__kind - == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) + if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) { THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); @@ -178,14 +177,15 @@ pthread_mutex_timedlock (mutex, abstime) } result = lll_robust_timedlock (mutex->__data.__lock, abstime, id, - /* XYZ */ LLL_SHARED); + PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); if (__builtin_expect (mutex->__data.__owner == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) { /* This mutex is now not recoverable. */ mutex->__data.__count = 0; - lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (mutex->__data.__lock, + PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return ENOTRECOVERABLE; } @@ -446,8 +446,7 @@ pthread_mutex_timedlock (mutex, abstime) lll_futex_timed_wait (&mutex->__data.__lock, ceilval | 2, &rt, - // XYZ check mutex flag - LLL_SHARED); + PTHREAD_MUTEX_PSHARED (mutex)); } } while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c index 9478d382c2..f6e24d4138 100644 --- a/nptl/pthread_mutex_trylock.c +++ b/nptl/pthread_mutex_trylock.c @@ -115,16 +115,15 @@ __pthread_mutex_trylock (mutex) /* Check whether we already hold the mutex. */ if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) { - if (mutex->__data.__kind - == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) + int kind = PTHREAD_MUTEX_TYPE (mutex); + if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) { THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return EDEADLK; } - if (mutex->__data.__kind - == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) + if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) { THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); @@ -154,7 +153,8 @@ __pthread_mutex_trylock (mutex) /* This mutex is now not recoverable. */ mutex->__data.__count = 0; if (oldval == id) - lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (mutex->__data.__lock, + PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return ENOTRECOVERABLE; } diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c index 6226089ebe..d33d0593d8 100644 --- a/nptl/pthread_mutex_unlock.c +++ b/nptl/pthread_mutex_unlock.c @@ -61,7 +61,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr) --mutex->__data.__nusers; /* Unlock. */ - lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); + lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)); break; case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: @@ -115,7 +115,8 @@ __pthread_mutex_unlock_usercnt (mutex, decr) --mutex->__data.__nusers; /* Unlock. */ - lll_robust_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED); + lll_robust_unlock (mutex->__data.__lock, + PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); break; @@ -242,8 +243,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr) if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1) lll_futex_wake (&mutex->__data.__lock, 1, - // XYZ check mutex flag - LLL_SHARED); + PTHREAD_MUTEX_PSHARED (mutex)); int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; return __pthread_tpp_change_priority (oldprio, -1); diff --git a/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h index f3f291979a..ab829ad7af 100644 --- a/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h +++ b/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h @@ -103,24 +103,24 @@ while (0) /* Returns non-zero if error happened, zero if success. */ -#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \ +#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \ ({ \ INTERNAL_SYSCALL_DECL (__err); \ long int __ret; \ - __ret = INTERNAL_SYSCALL (futex, __err, 6, \ - (futexp), FUTEX_CMP_REQUEUE, (nr_wake), \ - (nr_move), (mutex), (val)); \ + __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \ + __lll_private_flag (FUTEX_CMP_REQUEUE, private),\ + (nr_wake), (nr_move), (mutex), (val)); \ INTERNAL_SYSCALL_ERROR_P (__ret, __err); \ }) /* Returns non-zero if error happened, zero if success. */ -#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) \ +#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \ ({ \ INTERNAL_SYSCALL_DECL (__err); \ long int __ret; \ - __ret = INTERNAL_SYSCALL (futex, __err, 6, \ - (futexp), FUTEX_WAKE_OP, (nr_wake), \ - (nr_wake2), (futexp2), \ + __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \ + __lll_private_flag (FUTEX_WAKE_OP, private), \ + (nr_wake), (nr_wake2), (futexp2), \ FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \ INTERNAL_SYSCALL_ERROR_P (__ret, __err); \ }) diff --git a/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h index 3c28a397ea..fd30c43103 100644 --- a/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h +++ b/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h @@ -103,18 +103,20 @@ do \ while (0) /* Returns non-zero if error happened, zero if success. */ -#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val) \ +#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val, private) \ ({ \ - DO_INLINE_SYSCALL(futex, 6, (long) (ftx), FUTEX_CMP_REQUEUE, \ + DO_INLINE_SYSCALL(futex, 6, (long) (ftx), \ + __lll_private_flag (FUTEX_CMP_REQUEUE, private), \ (int) (nr_wake), (int) (nr_move), (long) (mutex), \ (int) val); \ _r10 == -1; \ }) /* Returns non-zero if error happened, zero if success. */ -#define lll_futex_wake_unlock(ftx, nr_wake, nr_wake2, ftx2) \ +#define lll_futex_wake_unlock(ftx, nr_wake, nr_wake2, ftx2, private) \ ({ \ - DO_INLINE_SYSCALL(futex, 6, (long) (ftx), FUTEX_WAKE_OP, \ + DO_INLINE_SYSCALL(futex, 6, (long) (ftx), \ + __lll_private_flag (FUTEX_WAKE_OP, private), \ (int) (nr_wake), (int) (nr_wake2), (long) (ftx2), \ FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \ _r10 == -1; \ diff --git a/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h index 41804d1372..87935c1a85 100644 --- a/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h +++ b/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h @@ -107,14 +107,14 @@ while (0) /* Returns non-zero if error happened, zero if success. */ -#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \ +#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \ ({ \ INTERNAL_SYSCALL_DECL (__err); \ long int __ret; \ \ - __ret = INTERNAL_SYSCALL (futex, __err, 6, \ - (futexp), FUTEX_CMP_REQUEUE, (nr_wake), \ - (nr_move), (mutex), (val)); \ + __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \ + __lll_private_flag (FUTEX_CMP_REQUEUE, private),\ + (nr_wake), (nr_move), (mutex), (val)); \ INTERNAL_SYSCALL_ERROR_P (__ret, __err); \ }) diff --git a/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym b/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym index a1b6794260..d985c6a79b 100644 --- a/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym +++ b/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym @@ -4,3 +4,4 @@ MUTEX_KIND offsetof (pthread_mutex_t, __data.__kind) PI_BIT PTHREAD_MUTEX_PRIO_INHERIT_NP +PS_BIT PTHREAD_MUTEX_PSHARED_BIT diff --git a/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c b/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c index 81ecd6556b..93841c5b3e 100644 --- a/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c +++ b/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c @@ -1,8 +1,12 @@ #include <pthreadP.h> -#define LLL_MUTEX_LOCK(mutex) lll_cond_lock (mutex, /* XYZ */ LLL_SHARED) -#define LLL_MUTEX_TRYLOCK(mutex) lll_cond_trylock (mutex) -#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_cond_lock (mutex, id, /* XYZ */ LLL_SHARED) +#define LLL_MUTEX_LOCK(mutex) \ + lll_cond_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)) +#define LLL_MUTEX_TRYLOCK(mutex) \ + lll_cond_trylock ((mutex)->__data.__lock) +#define LLL_ROBUST_MUTEX_LOCK(mutex, id) \ + lll_robust_cond_lock ((mutex)->__data.__lock, id, \ + PTHREAD_ROBUST_MUTEX_PSHARED (mutex)) #define __pthread_mutex_lock __pthread_mutex_cond_lock #define NO_INCR diff --git a/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h index ad4d27300f..d687e13c0b 100644 --- a/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h +++ b/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h @@ -93,7 +93,7 @@ ({ \ register unsigned long int __r2 asm ("2") = (unsigned long int) (futex); \ register unsigned long int __r3 asm ("3") \ - __lll_private_flag (FUTEX_WAKE, private); \ + = __lll_private_flag (FUTEX_WAKE, private); \ register unsigned long int __r4 asm ("4") = (unsigned long int) (nr); \ register unsigned long int __result asm ("2"); \ \ @@ -117,10 +117,11 @@ /* Returns non-zero if error happened, zero if success. */ -#define lll_futex_requeue(futex, nr_wake, nr_move, mutex, val) \ +#define lll_futex_requeue(futex, nr_wake, nr_move, mutex, val, private) \ ({ \ register unsigned long int __r2 asm ("2") = (unsigned long int) (futex); \ - register unsigned long int __r3 asm ("3") = FUTEX_CMP_REQUEUE; \ + register unsigned long int __r3 asm ("3") \ + = __lll_private_flag (FUTEX_CMP_REQUEUE, private); \ register unsigned long int __r4 asm ("4") = (long int) (nr_wake); \ register unsigned long int __r5 asm ("5") = (long int) (nr_move); \ register unsigned long int __r6 asm ("6") = (unsigned long int) (mutex); \ @@ -137,10 +138,11 @@ /* Returns non-zero if error happened, zero if success. */ -#define lll_futex_wake_unlock(futex, nr_wake, nr_wake2, futex2) \ +#define lll_futex_wake_unlock(futex, nr_wake, nr_wake2, futex2, private) \ ({ \ register unsigned long int __r2 asm ("2") = (unsigned long int) (futex); \ - register unsigned long int __r3 asm ("3") = FUTEX_WAKE_OP; \ + register unsigned long int __r3 asm ("3") \ + = __lll_private_flag (FUTEX_WAKE_OP, private); \ register unsigned long int __r4 asm ("4") = (long int) (nr_wake); \ register unsigned long int __r5 asm ("5") = (long int) (nr_wake2); \ register unsigned long int __r6 asm ("6") = (unsigned long int) (futex2); \ diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h index 38692bbd2d..24cbbe413d 100644 --- a/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h +++ b/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h @@ -96,14 +96,14 @@ }) /* Returns non-zero if error happened, zero if success. */ -#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \ +#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \ ({ \ INTERNAL_SYSCALL_DECL (__err); \ long int __ret; \ \ - __ret = INTERNAL_SYSCALL (futex, __err, 6, \ - (futexp), FUTEX_CMP_REQUEUE, (nr_wake), \ - (nr_move), (mutex), (val)); \ + __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \ + __lll_private_flag (FUTEX_CMP_REQUEUE, private),\ + (nr_wake), (nr_move), (mutex), (val)); \ INTERNAL_SYSCALL_ERROR_P (__ret, __err); \ }) @@ -121,14 +121,14 @@ /* Avoid FUTEX_WAKE_OP if supporting pre-v9 CPUs. */ # define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) 1 #else -# define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) \ +# define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \ ({ \ INTERNAL_SYSCALL_DECL (__err); \ long int __ret; \ \ - __ret = INTERNAL_SYSCALL (futex, __err, 6, \ - (futexp), FUTEX_WAKE_OP, (nr_wake), \ - (nr_wake2), (futexp2), \ + __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \ + __lll_private_flag (FUTEX_WAKE_OP, private), \ + (nr_wake), (nr_wake2), (futexp2), \ FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \ INTERNAL_SYSCALL_ERROR_P (__ret, __err); \ }) diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h index 192d203926..2cd69a14ce 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h @@ -533,7 +533,7 @@ LLL_STUB_UNWIND_INFO_END while (0) /* Returns non-zero if error happened, zero if success. */ -#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val) \ +#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val, private) \ ({ int __res; \ register int __nr_move __asm ("r10") = nr_move; \ register void *__mutex __asm ("r8") = mutex; \ @@ -541,7 +541,8 @@ LLL_STUB_UNWIND_INFO_END __asm __volatile ("syscall" \ : "=a" (__res) \ : "0" (__NR_futex), "D" ((void *) ftx), \ - "S" (FUTEX_CMP_REQUEUE), "d" (nr_wake), \ + "S" (__lll_private_flag (FUTEX_CMP_REQUEUE, \ + private)), "d" (nr_wake), \ "r" (__nr_move), "r" (__mutex), "r" (__val) \ : "cx", "r11", "cc", "memory"); \ __res < 0; }) diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S index 0c619bf271..6b8a29e768 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S @@ -71,11 +71,18 @@ __pthread_cond_broadcast: je 9f /* XXX: The kernel so far doesn't support requeue to PI futex. */ - testl $PI_BIT, MUTEX_KIND(%r8) + /* XXX: The kernel only supports FUTEX_CMP_REQUEUE to the same + type of futex (private resp. shared). */ + testl $(PI_BIT | PS_BIT), MUTEX_KIND(%r8) jne 9f /* Wake up all threads. */ - movl $FUTEX_CMP_REQUEUE, %esi +#ifdef __ASSUME_PRIVATE_FUTEX + movl $(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %esi +#else + movl %fs:PRIVATE_FUTEX, %esi + orl $FUTEX_CMP_REQUEUE, %esi +#endif movl $SYS_futex, %eax movl $1, %edx movl $0x7fffffff, %r10d @@ -104,8 +111,10 @@ __pthread_cond_broadcast: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_lock_wait #if cond_lock != 0 subq $cond_lock, %rdi @@ -114,22 +123,36 @@ __pthread_cond_broadcast: /* Unlock in loop requires wakeup. */ 5: addq $cond_lock-cond_futex, %rdi - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_unlock_wake jmp 6b /* Unlock in loop requires wakeup. */ 7: addq $cond_lock-cond_futex, %rdi - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_unlock_wake subq $cond_lock-cond_futex, %rdi jmp 8b 9: /* The futex requeue functionality is not available. */ + cmpq $-1, dep_mutex-cond_futex(%rdi) movl $0x7fffffff, %edx - movl $FUTEX_WAKE, %esi +#ifdef __ASSUME_PRIVATE_FUTEX + movl $FUTEX_WAKE, %eax + movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi + cmove %eax, %esi +#else + movl $0, %eax + movl %fs:PRIVATE_FUTEX, %esi + cmove %eax, %esi + orl $FUTEX_WAKE, %esi +#endif movl $SYS_futex, %eax syscall jmp 10b diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S index 2fc9d1fad7..8be6d4a21b 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S @@ -55,10 +55,20 @@ __pthread_cond_signal: addl $1, (%rdi) /* Wake up one thread. */ - movl $FUTEX_WAKE_OP, %esi - movl $SYS_futex, %eax + cmpq $-1, dep_mutex(%r8) movl $1, %edx +#ifdef __ASSUME_PRIVATE_FUTEX + movl $FUTEX_WAKE_OP, %eax + movl $(FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG), %esi + cmove %eax, %esi +#else + movl $0, %eax + movl %fs:PRIVATE_FUTEX, %esi + cmove %eax, %esi + orl $FUTEX_WAKE_OP, %esi +#endif movl $1, %r10d + movl $SYS_futex, %eax #if cond_lock != 0 addq $cond_lock, %r8 #endif @@ -75,7 +85,9 @@ __pthread_cond_signal: xorl %eax, %eax retq -7: movl $FUTEX_WAKE, %esi +7: /* %esi should be either FUTEX_WAKE_OP or + FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG from the previous syscall. */ + xorl $(FUTEX_WAKE | FUTEX_WAKE_OP), %esi movl $SYS_futex, %eax /* %rdx should be 1 already from $FUTEX_WAKE_OP syscall. movl $1, %edx */ @@ -98,8 +110,10 @@ __pthread_cond_signal: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_lock_wait #if cond_lock != 0 subq $cond_lock, %rdi @@ -109,8 +123,13 @@ __pthread_cond_signal: /* Unlock in loop requires wakeup. */ 5: movq %r8, %rdi - /* XYZ */ +#if cond_lock != 0 + addq $cond_lock, %rdi +#endif + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_unlock_wake jmp 6b .size __pthread_cond_signal, .-__pthread_cond_signal diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S index 003069fb6b..415f06f467 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S @@ -186,12 +186,20 @@ __pthread_cond_timedwait: movl %eax, (%rsp) leaq 24(%rsp), %r10 -#if FUTEX_WAIT == 0 - xorl %esi, %esi + cmpq $-1, dep_mutex(%rdi) + movq %r12, %rdx +#ifdef __ASSUME_PRIVATE_FUTEX + movl $FUTEX_WAIT, %eax + movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi + cmove %eax, %esi #else - movl $FUTEX_WAIT, %esi + movl $0, %eax + movl %fs:PRIVATE_FUTEX, %esi + cmove %eax, %esi +# if FUTEX_WAIT != 0 + orl $FUTEX_WAIT, %esi +# endif #endif - movq %r12, %rdx addq $cond_futex, %rdi movl $SYS_futex, %eax syscall @@ -251,9 +259,19 @@ __pthread_cond_timedwait: jne 25f addq $cond_nwaiters, %rdi - movl $SYS_futex, %eax - movl $FUTEX_WAKE, %esi + cmpq $-1, dep_mutex-cond_nwaiters(%rdi) movl $1, %edx +#ifdef __ASSUME_PRIVATE_FUTEX + movl $FUTEX_WAKE, %eax + movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi + cmove %eax, %esi +#else + movl $0, %eax + movl %fs:PRIVATE_FUTEX, %esi + cmove %eax, %esi + orl $FUTEX_WAKE, %esi +#endif + movl $SYS_futex, %eax syscall subq $cond_nwaiters, %rdi @@ -292,8 +310,10 @@ __pthread_cond_timedwait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_lock_wait jmp 2b @@ -302,8 +322,10 @@ __pthread_cond_timedwait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_unlock_wake jmp 4b @@ -312,8 +334,10 @@ __pthread_cond_timedwait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_lock_wait #if cond_lock != 0 subq $cond_lock, %rdi @@ -325,8 +349,10 @@ __pthread_cond_timedwait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_unlock_wake jmp 11b @@ -344,8 +370,10 @@ __pthread_cond_timedwait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_unlock_wake 17: movq (%rsp), %rax diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S index 34ef2c7b77..db2683b87b 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S @@ -49,8 +49,10 @@ __condvar_cleanup: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_lock_wait #if cond_lock != 0 subq $cond_lock, %rdi @@ -81,9 +83,19 @@ __condvar_cleanup: jne 4f addq $cond_nwaiters, %rdi - movl $SYS_futex, %eax - movl $FUTEX_WAKE, %esi + cmpq $-1, dep_mutex-cond_nwaiters(%rdi) movl $1, %edx +#ifdef __ASSUME_PRIVATE_FUTEX + movl $FUTEX_WAKE, %eax + movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi + cmove %eax, %esi +#else + movl $0, %eax + movl %fs:PRIVATE_FUTEX, %esi + cmove %eax, %esi + orl $FUTEX_WAKE, %esi +#endif + movl $SYS_futex, %eax syscall subq $cond_nwaiters, %rdi movl $1, %r12d @@ -98,16 +110,28 @@ __condvar_cleanup: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_unlock_wake /* Wake up all waiters to make sure no signal gets lost. */ 2: testq %r12, %r12 jnz 5f addq $cond_futex, %rdi - movl $FUTEX_WAKE, %esi + cmpq $-1, dep_mutex-cond_futex(%rdi) movl $0x7fffffff, %edx +#ifdef __ASSUME_PRIVATE_FUTEX + movl $FUTEX_WAKE, %eax + movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi + cmove %eax, %esi +#else + movl $0, %eax + movl %fs:PRIVATE_FUTEX, %esi + cmove %eax, %esi + orl $FUTEX_WAKE, %esi +#endif movl $SYS_futex, %eax syscall @@ -216,12 +240,20 @@ __pthread_cond_wait: xorq %r10, %r10 movq %r12, %rdx addq $cond_futex-cond_lock, %rdi - movl $SYS_futex, %eax -#if FUTEX_WAIT == 0 - xorl %esi, %esi + cmpq $-1, dep_mutex-cond_futex(%rdi) +#ifdef __ASSUME_PRIVATE_FUTEX + movl $FUTEX_WAIT, %eax + movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi + cmove %eax, %esi #else - movl $FUTEX_WAIT, %esi + movl $FUTEX_WAIT, %eax + movl %fs:PRIVATE_FUTEX, %esi + cmove %eax, %esi +# if FUTEX_WAIT != 0 + orl $FUTEX_WAIT, %esi +# endif #endif + movl $SYS_futex, %eax syscall movl (%rsp), %edi @@ -267,9 +299,19 @@ __pthread_cond_wait: jne 17f addq $cond_nwaiters, %rdi - movl $SYS_futex, %eax - movl $FUTEX_WAKE, %esi + cmpq $-1, dep_mutex-cond_nwaiters(%rdi) movl $1, %edx +#ifdef __ASSUME_PRIVATE_FUTEX + movl $FUTEX_WAKE, %eax + movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi + cmove %eax, %esi +#else + movl $0, %eax + movl %fs:PRIVATE_FUTEX, %esi + cmove %eax, %esi + orl $FUTEX_WAKE, %esi +#endif + movl $SYS_futex, %eax syscall subq $cond_nwaiters, %rdi @@ -302,8 +344,10 @@ __pthread_cond_wait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_lock_wait jmp 2b @@ -312,8 +356,10 @@ __pthread_cond_wait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_unlock_wake jmp 4b @@ -322,8 +368,10 @@ __pthread_cond_wait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_lock_wait #if cond_lock != 0 subq $cond_lock, %rdi @@ -335,8 +383,10 @@ __pthread_cond_wait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_unlock_wake jmp 11b @@ -354,8 +404,10 @@ __pthread_cond_wait: #if cond_lock != 0 addq $cond_lock, %rdi #endif - /* XYZ */ + cmpq $-1, dep_mutex-cond_lock(%rdi) + movl $LLL_PRIVATE, %eax movl $LLL_SHARED, %esi + cmovne %eax, %esi callq __lll_unlock_wake 13: movq %r10, %rax |