diff options
Diffstat (limited to 'nptl')
54 files changed, 202 insertions, 129 deletions
diff --git a/nptl/ChangeLog b/nptl/ChangeLog index 474a473bf8..f7241c51e4 100644 --- a/nptl/ChangeLog +++ b/nptl/ChangeLog @@ -1,3 +1,76 @@ +2014-02-10 Ondřej Bílka <neleai@seznam.cz> + + * allocatestack.c (queue_stack, allocate_stack, + __deallocate_stack, __reclaim_stacks): Use glibc_likely instead + __builtin_expect. + * cancellation.c (__pthread_enable_asynccancel, + __pthread_disable_asynccancel): Likewise. + * cleanup_defer.c (__pthread_register_cancel_defer, + __pthread_unregister_cancel_restore): Likewise. + * cleanup_defer_compat.c (_pthread_cleanup_push_defer, + _pthread_cleanup_pop_restore): Likewise. + * cond-perf.c (main): Likewise. + * nptl-init.c (sigcancel_handler, sighandler_setxid): Likewise. + * perf.c (get_clockfreq): Likewise. + * pthread_barrier_destroy.c (pthread_barrier_destroy): Likewise. + * pthread_barrier_init.c (pthread_barrier_init): Likewise. + * pthread_cond_timedwait.c (__pthread_cond_timedwait): Likewise. + * pthread_cond_wait.c (__pthread_cond_wait): Likewise. + * pthread_create.c (__free_tcb, start_thread, __pthread_create_2_1): + Likewise. + * pthread_getattr_np.c (pthread_getattr_np): Likewise. + * pthread_getspecific.c (__pthread_getspecific): Likewise. + * pthread_join.c (pthread_join): Likewise. + * pthread_key_delete.c (pthread_key_delete): Likewise. + * pthread_mutex_init.c (__pthread_mutex_init): Likewise. + * pthread_mutex_lock.c (__pthread_mutex_lock, + __pthread_mutex_lock_full): Likewise. + * pthread_mutex_timedlock.c (pthread_mutex_timedlock): Likewise. + * pthread_mutex_trylock.c (__pthread_mutex_trylock): Likewise. + * pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt): Likewise. + * pthread_rwlock_rdlock.c (__pthread_rwlock_rdlock): Likewise. + * pthread_rwlock_timedrdlock.c (pthread_rwlock_timedrdlock): Likewise. + * pthread_rwlock_timedwrlock.c (pthread_rwlock_timedwrlock): Likewise. + * pthread_rwlock_tryrdlock.c (__pthread_rwlock_tryrdlock): Likewise. + * pthread_setcancelstate.c (__pthread_setcancelstate): Likewise. + * pthread_setcanceltype.c (__pthread_setcanceltype): Likewise. + * pthread_setschedprio.c (pthread_setschedprio): Likewise. + * pthread_setspecific.c (__pthread_setspecific): Likewise. + * sem_init.c (__new_sem_init): Likewise. + * sem_open.c (__where_is_shmfs): Likewise. + * sigaction.c: Likewise. + * sockperf.c (get_clockfreq): Likewise. + * sysdeps/pthread/createthread.c (do_clone, create_thread): Likewise. + * sysdeps/pthread/setxid.h: Likewise. + * sysdeps/pthread/timer_create.c (timer_create): Likewise. + * sysdeps/pthread/unwind-forcedunwind.c (pthread_cancel_init, + __unwind_freeres, _Unwind_Resume, __gcc_personality_v0, + _Unwind_ForcedUnwind): Likewise. + * sysdeps/unix/sysv/linux/getpid.c (__getpid): Likewise. + * sysdeps/unix/sysv/linux/lowlevelrobustlock.c + (__lll_robust_lock_wait, __lll_robust_timedlock_wait): Likewise. + * sysdeps/unix/sysv/linux/mq_notify.c (mq_notify): Likewise. + * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise. + * sysdeps/unix/sysv/linux/pthread_kill.c (__pthread_kill): Likewise. + * sysdeps/unix/sysv/linux/pthread_setaffinity.c + (__pthread_setaffinity_new): Likewise. + * sysdeps/unix/sysv/linux/pthread_sigqueue.c (pthread_sigqueue): + Likewise. + * sysdeps/unix/sysv/linux/pt-raise.c (raise): Likewise. + * sysdeps/unix/sysv/linux/raise.c (raise): Likewise. + * sysdeps/unix/sysv/linux/s390/lowlevellock.h (__lll_robust_trylock, + __lll_robust_lock, __lll_cond_lock, __lll_robust_timedlock): Likewise. + * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (__lll_lock, + __lll_cond_lock, __lll_timedlock, __lll_robust_timedlock): Likewise. + * sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c + (pthread_barrier_destroy): Likewise. + * sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c + (pthread_barrier_init): Likewise. + * sysdeps/unix/sysv/linux/sparc/sem_init.c (__new_sem_init): Likewise. + * sysdeps/unix/sysv/linux/x86_64/timer_create.c (__timer_create_old): + Likewise. + * unwind.c (unwind_stop): Likewise. + 2014-02-08 Mike Frysinger <vapier@gentoo.org> * sem_open.c (__where_is_shmfs): Compare f.f_type to RAMFS_MAGIC too. diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c index 13eca47cf2..1e22f7da90 100644 --- a/nptl/allocatestack.c +++ b/nptl/allocatestack.c @@ -306,7 +306,7 @@ queue_stack (struct pthread *stack) stack_list_add (&stack->list, &stack_cache); stack_cache_actsize += stack->stackblock_size; - if (__builtin_expect (stack_cache_actsize > stack_cache_maxsize, 0)) + if (__glibc_unlikely (stack_cache_actsize > stack_cache_maxsize)) __free_stacks (stack_cache_maxsize); } @@ -368,7 +368,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, } /* Get memory for the stack. */ - if (__builtin_expect (attr->flags & ATTR_FLAG_STACKADDR, 0)) + if (__glibc_unlikely (attr->flags & ATTR_FLAG_STACKADDR)) { uintptr_t adj; @@ -504,7 +504,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, mem = mmap (NULL, size, prot, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); - if (__builtin_expect (mem == MAP_FAILED, 0)) + if (__glibc_unlikely (mem == MAP_FAILED)) return errno; /* SIZE is guaranteed to be greater than zero. @@ -525,7 +525,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, /* Make sure the coloring offsets does not disturb the alignment of the TCB and static TLS block. */ - if (__builtin_expect ((coloring & __static_tls_align_m1) != 0, 0)) + if (__glibc_unlikely ((coloring & __static_tls_align_m1) != 0)) coloring = (((coloring + __static_tls_align_m1) & ~(__static_tls_align_m1)) & ~pagesize_m1); @@ -629,7 +629,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, } /* Create or resize the guard area if necessary. */ - if (__builtin_expect (guardsize > pd->guardsize, 0)) + if (__glibc_unlikely (guardsize > pd->guardsize)) { #ifdef NEED_SEPARATE_REGISTER_STACK char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1); @@ -752,7 +752,7 @@ __deallocate_stack (struct pthread *pd) not reset the 'used' flag in the 'tid' field. This is done by the kernel. If no thread has been created yet this field is still zero. */ - if (__builtin_expect (! pd->user_stack, 1)) + if (__glibc_likely (! pd->user_stack)) (void) queue_stack (pd); else /* Free the memory associated with the ELF TLS. */ @@ -916,7 +916,7 @@ __reclaim_stacks (void) INIT_LIST_HEAD (&stack_used); INIT_LIST_HEAD (&__stack_user); - if (__builtin_expect (THREAD_GETMEM (self, user_stack), 0)) + if (__glibc_unlikely (THREAD_GETMEM (self, user_stack))) list_add (&self->list, &__stack_user); else list_add (&self->list, &stack_used); diff --git a/nptl/cancellation.c b/nptl/cancellation.c index bddea1f3fa..aaf102dd0f 100644 --- a/nptl/cancellation.c +++ b/nptl/cancellation.c @@ -40,7 +40,7 @@ __pthread_enable_asynccancel (void) int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval); - if (__builtin_expect (curval == oldval, 1)) + if (__glibc_likely (curval == oldval)) { if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval)) { @@ -79,7 +79,7 @@ __pthread_disable_asynccancel (int oldtype) int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval); - if (__builtin_expect (curval == oldval, 1)) + if (__glibc_likely (curval == oldval)) break; /* Prepare the next round. */ diff --git a/nptl/cleanup_defer.c b/nptl/cleanup_defer.c index 4049497155..a8fc40304e 100644 --- a/nptl/cleanup_defer.c +++ b/nptl/cleanup_defer.c @@ -34,14 +34,14 @@ __pthread_register_cancel_defer (__pthread_unwind_buf_t *buf) int cancelhandling = THREAD_GETMEM (self, cancelhandling); /* Disable asynchronous cancellation for now. */ - if (__builtin_expect (cancelhandling & CANCELTYPE_BITMASK, 0)) + if (__glibc_unlikely (cancelhandling & CANCELTYPE_BITMASK)) while (1) { int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, cancelhandling & ~CANCELTYPE_BITMASK, cancelhandling); - if (__builtin_expect (curval == cancelhandling, 1)) + if (__glibc_likely (curval == cancelhandling)) /* Successfully replaced the value. */ break; @@ -78,7 +78,7 @@ __pthread_unregister_cancel_restore (__pthread_unwind_buf_t *buf) cancelhandling | CANCELTYPE_BITMASK, cancelhandling); - if (__builtin_expect (curval == cancelhandling, 1)) + if (__glibc_likely (curval == cancelhandling)) /* Successfully replaced the value. */ break; diff --git a/nptl/cleanup_defer_compat.c b/nptl/cleanup_defer_compat.c index b57fd4e285..9c52f5fc53 100644 --- a/nptl/cleanup_defer_compat.c +++ b/nptl/cleanup_defer_compat.c @@ -34,14 +34,14 @@ _pthread_cleanup_push_defer (buffer, routine, arg) int cancelhandling = THREAD_GETMEM (self, cancelhandling); /* Disable asynchronous cancellation for now. */ - if (__builtin_expect (cancelhandling & CANCELTYPE_BITMASK, 0)) + if (__glibc_unlikely (cancelhandling & CANCELTYPE_BITMASK)) while (1) { int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, cancelhandling & ~CANCELTYPE_BITMASK, cancelhandling); - if (__builtin_expect (curval == cancelhandling, 1)) + if (__glibc_likely (curval == cancelhandling)) /* Successfully replaced the value. */ break; @@ -78,7 +78,7 @@ _pthread_cleanup_pop_restore (buffer, execute) cancelhandling | CANCELTYPE_BITMASK, cancelhandling); - if (__builtin_expect (curval == cancelhandling, 1)) + if (__glibc_likely (curval == cancelhandling)) /* Successfully replaced the value. */ break; diff --git a/nptl/cond-perf.c b/nptl/cond-perf.c index e37914e6b7..9c9488e274 100644 --- a/nptl/cond-perf.c +++ b/nptl/cond-perf.c @@ -70,7 +70,7 @@ main (int argc, char *argv[]) pthread_t th[nthreads]; int i; for (i = 0; __builtin_expect (i < nthreads, 1); ++i) - if (__builtin_expect ((err = pthread_create (&th[i], NULL, cons, (void *) (long) i)) != 0, 0)) + if (__glibc_unlikely ((err = pthread_create (&th[i], NULL, cons, (void *) (long) i)) != 0)) printf ("pthread_create: %s\n", strerror (err)); for (i = 0; __builtin_expect (i < nrounds, 1); ++i) diff --git a/nptl/nptl-init.c b/nptl/nptl-init.c index 4d1f7d8721..794156ba8e 100644 --- a/nptl/nptl-init.c +++ b/nptl/nptl-init.c @@ -175,7 +175,7 @@ sigcancel_handler (int sig, siginfo_t *si, void *ctx) /* Determine the process ID. It might be negative if the thread is in the middle of a fork() call. */ pid_t pid = THREAD_GETMEM (THREAD_SELF, pid); - if (__builtin_expect (pid < 0, 0)) + if (__glibc_unlikely (pid < 0)) pid = -pid; /* Safety check. It would be possible to call this function for @@ -232,7 +232,7 @@ sighandler_setxid (int sig, siginfo_t *si, void *ctx) /* Determine the process ID. It might be negative if the thread is in the middle of a fork() call. */ pid_t pid = THREAD_GETMEM (THREAD_SELF, pid); - if (__builtin_expect (pid < 0, 0)) + if (__glibc_unlikely (pid < 0)) pid = -pid; /* Safety check. It would be possible to call this function for diff --git a/nptl/perf.c b/nptl/perf.c index ceb30c6bcc..3ecab2ca56 100644 --- a/nptl/perf.c +++ b/nptl/perf.c @@ -633,7 +633,7 @@ get_clockfreq (void) return result; fd = open ("/proc/cpuinfo", O_RDONLY); - if (__builtin_expect (fd != -1, 1)) + if (__glibc_likely (fd != -1)) { /* XXX AFAIK the /proc filesystem can generate "files" only up to a size of 4096 bytes. */ @@ -645,7 +645,7 @@ get_clockfreq (void) { char *mhz = memmem (buf, n, "cpu MHz", 7); - if (__builtin_expect (mhz != NULL, 1)) + if (__glibc_likely (mhz != NULL)) { char *endp = buf + n; int seen_decpoint = 0; diff --git a/nptl/pthread_barrier_destroy.c b/nptl/pthread_barrier_destroy.c index 60fe2dfbff..c59a861221 100644 --- a/nptl/pthread_barrier_destroy.c +++ b/nptl/pthread_barrier_destroy.c @@ -32,7 +32,7 @@ pthread_barrier_destroy (barrier) lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG); - if (__builtin_expect (ibarrier->left == ibarrier->init_count, 1)) + if (__glibc_likely (ibarrier->left == ibarrier->init_count)) /* The barrier is not used anymore. */ result = 0; else diff --git a/nptl/pthread_barrier_init.c b/nptl/pthread_barrier_init.c index 0e603ba59e..ac180a9a5c 100644 --- a/nptl/pthread_barrier_init.c +++ b/nptl/pthread_barrier_init.c @@ -36,7 +36,7 @@ pthread_barrier_init (barrier, attr, count) { struct pthread_barrier *ibarrier; - if (__builtin_expect (count == 0, 0)) + if (__glibc_unlikely (count == 0)) return EINVAL; const struct pthread_barrierattr *iattr diff --git a/nptl/pthread_cond_timedwait.c b/nptl/pthread_cond_timedwait.c index b975b8f126..1698085361 100644 --- a/nptl/pthread_cond_timedwait.c +++ b/nptl/pthread_cond_timedwait.c @@ -87,7 +87,7 @@ __pthread_cond_timedwait (cond, mutex, abstime) /* Work around the fact that the kernel rejects negative timeout values despite them being valid. */ - if (__builtin_expect (abstime->tv_sec < 0, 0)) + if (__glibc_unlikely (abstime->tv_sec < 0)) goto timeout; /* Remember the mutex we are using here. If there is already a @@ -143,7 +143,7 @@ __pthread_cond_timedwait (cond, mutex, abstime) --rt.tv_sec; } /* Did we already time out? */ - if (__builtin_expect (rt.tv_sec < 0, 0)) + if (__glibc_unlikely (rt.tv_sec < 0)) { if (cbuffer.bc_seq != cond->__data.__broadcast_seq) goto bc_out; @@ -217,7 +217,7 @@ __pthread_cond_timedwait (cond, mutex, abstime) break; /* Not woken yet. Maybe the time expired? */ - if (__builtin_expect (err == -ETIMEDOUT, 0)) + if (__glibc_unlikely (err == -ETIMEDOUT)) { timeout: /* Yep. Adjust the counters. */ diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c index 6222d922ff..9d2dfceec5 100644 --- a/nptl/pthread_cond_wait.c +++ b/nptl/pthread_cond_wait.c @@ -119,7 +119,7 @@ __pthread_cond_wait (cond, mutex) /* Now we can release the mutex. */ err = __pthread_mutex_unlock_usercnt (mutex, 0); - if (__builtin_expect (err, 0)) + if (__glibc_unlikely (err)) { lll_unlock (cond->__data.__lock, pshared); return err; diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c index 9d7f52f57e..cee0806d52 100644 --- a/nptl/pthread_create.c +++ b/nptl/pthread_create.c @@ -211,7 +211,7 @@ __free_tcb (struct pthread *pd) abort (); /* Free TPP data. */ - if (__builtin_expect (pd->tpp != NULL, 0)) + if (__glibc_unlikely (pd->tpp != NULL)) { struct priority_protection_data *tpp = pd->tpp; @@ -246,7 +246,7 @@ start_thread (void *arg) __ctype_init (); /* Allow setxid from now onwards. */ - if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0) == -2, 0)) + if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2)) lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE); #ifdef __NR_set_robust_list @@ -265,7 +265,7 @@ start_thread (void *arg) /* If the parent was running cancellation handlers while creating the thread the new thread inherited the signal mask. Reset the cancellation signal mask. */ - if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0)) + if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK)) { INTERNAL_SYSCALL_DECL (err); sigset_t mask; @@ -285,12 +285,12 @@ start_thread (void *arg) int not_first_call; not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf); - if (__builtin_expect (! not_first_call, 1)) + if (__glibc_likely (! not_first_call)) { /* Store the new cleanup handler info. */ THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf); - if (__builtin_expect (pd->stopped_start, 0)) + if (__glibc_unlikely (pd->stopped_start)) { int oldtype = CANCEL_ASYNC (); @@ -327,12 +327,12 @@ start_thread (void *arg) /* If this is the last thread we terminate the process now. We do not notify the debugger, it might just irritate it if there is no thread left. */ - if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0)) + if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads))) /* This was the last thread. */ exit (0); /* Report the death of the thread if this is wanted. */ - if (__builtin_expect (pd->report_events, 0)) + if (__glibc_unlikely (pd->report_events)) { /* See whether TD_DEATH is in any of the mask. */ const int idx = __td_eventword (TD_DEATH); @@ -412,7 +412,7 @@ start_thread (void *arg) if (IS_DETACHED (pd)) /* Free the TCB. */ __free_tcb (pd); - else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0)) + else if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK)) { /* Some other thread might call any of the setXid functions and expect us to reply. In this case wait until we did that. */ @@ -482,7 +482,7 @@ __pthread_create_2_1 (newthread, attr, start_routine, arg) int err = ALLOCATE_STACK (iattr, &pd); int retval = 0; - if (__builtin_expect (err != 0, 0)) + if (__glibc_unlikely (err != 0)) /* Something went wrong. Maybe a parameter of the attributes is invalid or we could not allocate memory. Note we have to translate error codes. */ diff --git a/nptl/pthread_getattr_np.c b/nptl/pthread_getattr_np.c index e79f282fee..21110c36b0 100644 --- a/nptl/pthread_getattr_np.c +++ b/nptl/pthread_getattr_np.c @@ -57,7 +57,7 @@ pthread_getattr_np (thread_id, attr) iattr->guardsize = thread->reported_guardsize; /* The sizes are subject to alignment. */ - if (__builtin_expect (thread->stackblock != NULL, 1)) + if (__glibc_likely (thread->stackblock != NULL)) { iattr->stacksize = thread->stackblock_size; iattr->stackaddr = (char *) thread->stackblock + iattr->stacksize; diff --git a/nptl/pthread_getspecific.c b/nptl/pthread_getspecific.c index e0e7daca24..e0cc1993ad 100644 --- a/nptl/pthread_getspecific.c +++ b/nptl/pthread_getspecific.c @@ -28,7 +28,7 @@ __pthread_getspecific (key) /* Special case access to the first 2nd-level block. This is the usual case. */ - if (__builtin_expect (key < PTHREAD_KEY_2NDLEVEL_SIZE, 1)) + if (__glibc_likely (key < PTHREAD_KEY_2NDLEVEL_SIZE)) data = &THREAD_SELF->specific_1stblock[key]; else { @@ -58,7 +58,7 @@ __pthread_getspecific (key) { uintptr_t seq = data->seq; - if (__builtin_expect (seq != __pthread_keys[key].seq, 0)) + if (__glibc_unlikely (seq != __pthread_keys[key].seq)) result = data->data = NULL; } diff --git a/nptl/pthread_join.c b/nptl/pthread_join.c index 69f844a860..5a43182d8a 100644 --- a/nptl/pthread_join.c +++ b/nptl/pthread_join.c @@ -99,7 +99,7 @@ pthread_join (threadid, thread_return) pthread_cleanup_pop (0); - if (__builtin_expect (result == 0, 1)) + if (__glibc_likely (result == 0)) { /* We mark the thread as terminated and as joined. */ pd->tid = -1; diff --git a/nptl/pthread_key_delete.c b/nptl/pthread_key_delete.c index a79a0ea52a..d2981b8af1 100644 --- a/nptl/pthread_key_delete.c +++ b/nptl/pthread_key_delete.c @@ -27,7 +27,7 @@ pthread_key_delete (key) { int result = EINVAL; - if (__builtin_expect (key < PTHREAD_KEYS_MAX, 1)) + if (__glibc_likely (key < PTHREAD_KEYS_MAX)) { unsigned int seq = __pthread_keys[key].seq; diff --git a/nptl/pthread_mutex_init.c b/nptl/pthread_mutex_init.c index d67dcdb5e2..2b3468835c 100644 --- a/nptl/pthread_mutex_init.c +++ b/nptl/pthread_mutex_init.c @@ -59,7 +59,7 @@ __pthread_mutex_init (mutex, mutexattr) case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: #ifndef __ASSUME_FUTEX_LOCK_PI - if (__builtin_expect (tpi_supported == 0, 0)) + if (__glibc_unlikely (tpi_supported == 0)) { int lock = 0; INTERNAL_SYSCALL_DECL (err); @@ -68,7 +68,7 @@ __pthread_mutex_init (mutex, mutexattr) assert (INTERNAL_SYSCALL_ERROR_P (ret, err)); tpi_supported = INTERNAL_SYSCALL_ERRNO (ret, err) == ENOSYS ? -1 : 1; } - if (__builtin_expect (tpi_supported < 0, 0)) + if (__glibc_unlikely (tpi_supported < 0)) return ENOTSUP; #endif break; diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c index add76e8a23..2c317770f9 100644 --- a/nptl/pthread_mutex_lock.c +++ b/nptl/pthread_mutex_lock.c @@ -71,7 +71,7 @@ __pthread_mutex_lock (mutex) | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0)) return __pthread_mutex_lock_full (mutex); - if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_NP, 1)) + if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP)) { FORCE_ELISION (mutex, goto elision); simple: @@ -80,7 +80,7 @@ __pthread_mutex_lock (mutex) assert (mutex->__data.__owner == 0); } #ifdef HAVE_ELISION - else if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_ELISION_NP, 1)) + else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP)) { elision: __attribute__((unused)) /* This case can never happen on a system without elision, @@ -101,7 +101,7 @@ __pthread_mutex_lock (mutex) if (mutex->__data.__owner == id) { /* Just bump the counter. */ - if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) + if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) /* Overflow of the counter. */ return EAGAIN; @@ -150,7 +150,7 @@ __pthread_mutex_lock (mutex) pid_t id = THREAD_GETMEM (THREAD_SELF, tid); assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP); /* Check whether we already hold the mutex. */ - if (__builtin_expect (mutex->__data.__owner == id, 0)) + if (__glibc_unlikely (mutex->__data.__owner == id)) return EDEADLK; goto simple; } @@ -229,7 +229,7 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex) } /* Check whether we already hold the mutex. */ - if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) + if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) { int kind = PTHREAD_MUTEX_TYPE (mutex); if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) @@ -245,7 +245,7 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex) NULL); /* Just bump the counter. */ - if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) + if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) /* Overflow of the counter. */ return EAGAIN; @@ -296,7 +296,7 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex) oldval = mutex->__data.__lock; /* Check whether we already hold the mutex. */ - if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) + if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) { if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) { @@ -309,7 +309,7 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex) THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); /* Just bump the counter. */ - if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) + if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) /* Overflow of the counter. */ return EAGAIN; @@ -359,7 +359,7 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex) assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); } - if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0)) + if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) { atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); @@ -427,7 +427,7 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex) if (kind == PTHREAD_MUTEX_RECURSIVE_NP) { /* Just bump the counter. */ - if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) + if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) /* Overflow of the counter. */ return EAGAIN; diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c index 8e7a52b54f..03619230ff 100644 --- a/nptl/pthread_mutex_timedlock.c +++ b/nptl/pthread_mutex_timedlock.c @@ -61,7 +61,7 @@ pthread_mutex_timedlock (mutex, abstime) if (mutex->__data.__owner == id) { /* Just bump the counter. */ - if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) + if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) /* Overflow of the counter. */ return EAGAIN; @@ -84,7 +84,7 @@ pthread_mutex_timedlock (mutex, abstime) /* Error checking mutex. */ case PTHREAD_MUTEX_ERRORCHECK_NP: /* Check whether we already hold the mutex. */ - if (__builtin_expect (mutex->__data.__owner == id, 0)) + if (__glibc_unlikely (mutex->__data.__owner == id)) return EDEADLK; /* FALLTHROUGH */ @@ -175,7 +175,7 @@ pthread_mutex_timedlock (mutex, abstime) } /* Check whether we already hold the mutex. */ - if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) + if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) { int kind = PTHREAD_MUTEX_TYPE (mutex); if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) @@ -191,7 +191,7 @@ pthread_mutex_timedlock (mutex, abstime) NULL); /* Just bump the counter. */ - if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) + if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) /* Overflow of the counter. */ return EAGAIN; @@ -250,7 +250,7 @@ pthread_mutex_timedlock (mutex, abstime) oldval = mutex->__data.__lock; /* Check whether we already hold the mutex. */ - if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) + if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) { if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) { @@ -263,7 +263,7 @@ pthread_mutex_timedlock (mutex, abstime) THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); /* Just bump the counter. */ - if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) + if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) /* Overflow of the counter. */ return EAGAIN; @@ -337,7 +337,7 @@ pthread_mutex_timedlock (mutex, abstime) assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); } - if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0)) + if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) { atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); @@ -400,7 +400,7 @@ pthread_mutex_timedlock (mutex, abstime) if (kind == PTHREAD_MUTEX_RECURSIVE_NP) { /* Just bump the counter. */ - if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) + if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) /* Overflow of the counter. */ return EAGAIN; diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c index 4d5f75d24f..e46f22ca2c 100644 --- a/nptl/pthread_mutex_trylock.c +++ b/nptl/pthread_mutex_trylock.c @@ -50,7 +50,7 @@ __pthread_mutex_trylock (mutex) if (mutex->__data.__owner == id) { /* Just bump the counter. */ - if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) + if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) /* Overflow of the counter. */ return EAGAIN; @@ -133,7 +133,7 @@ __pthread_mutex_trylock (mutex) } /* Check whether we already hold the mutex. */ - if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) + if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) { int kind = PTHREAD_MUTEX_TYPE (mutex); if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) @@ -149,7 +149,7 @@ __pthread_mutex_trylock (mutex) NULL); /* Just bump the counter. */ - if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) + if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) /* Overflow of the counter. */ return EAGAIN; @@ -211,7 +211,7 @@ __pthread_mutex_trylock (mutex) oldval = mutex->__data.__lock; /* Check whether we already hold the mutex. */ - if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) + if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) { if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) { @@ -224,7 +224,7 @@ __pthread_mutex_trylock (mutex) THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); /* Just bump the counter. */ - if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) + if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) /* Overflow of the counter. */ return EAGAIN; @@ -270,7 +270,7 @@ __pthread_mutex_trylock (mutex) oldval = mutex->__data.__lock; } - if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0)) + if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) { atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); @@ -337,7 +337,7 @@ __pthread_mutex_trylock (mutex) if (kind == PTHREAD_MUTEX_RECURSIVE_NP) { /* Just bump the counter. */ - if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) + if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) /* Overflow of the counter. */ return EAGAIN; diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c index 8fe5b67f09..95ae9337c8 100644 --- a/nptl/pthread_mutex_unlock.c +++ b/nptl/pthread_mutex_unlock.c @@ -60,7 +60,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr) return 0; } - else if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_ELISION_NP, 1)) + else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP)) { /* Don't reset the owner/users fields for elision. */ return lll_unlock_elision (mutex->__data.__lock, diff --git a/nptl/pthread_rwlock_rdlock.c b/nptl/pthread_rwlock_rdlock.c index a06c3f2d8b..3773f7db2a 100644 --- a/nptl/pthread_rwlock_rdlock.c +++ b/nptl/pthread_rwlock_rdlock.c @@ -45,7 +45,7 @@ __pthread_rwlock_rdlock (rwlock) || PTHREAD_RWLOCK_PREFER_READER_P (rwlock))) { /* Increment the reader counter. Avoid overflow. */ - if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0)) + if (__glibc_unlikely (++rwlock->__data.__nr_readers == 0)) { /* Overflow on number of readers. */ --rwlock->__data.__nr_readers; @@ -67,7 +67,7 @@ __pthread_rwlock_rdlock (rwlock) } /* Remember that we are a reader. */ - if (__builtin_expect (++rwlock->__data.__nr_readers_queued == 0, 0)) + if (__glibc_unlikely (++rwlock->__data.__nr_readers_queued == 0)) { /* Overflow on number of queued readers. */ --rwlock->__data.__nr_readers_queued; diff --git a/nptl/pthread_rwlock_timedrdlock.c b/nptl/pthread_rwlock_timedrdlock.c index 770cc343cf..81b2ee4096 100644 --- a/nptl/pthread_rwlock_timedrdlock.c +++ b/nptl/pthread_rwlock_timedrdlock.c @@ -78,7 +78,7 @@ pthread_rwlock_timedrdlock (rwlock, abstime) /* Work around the fact that the kernel rejects negative timeout values despite them being valid. */ - if (__builtin_expect (abstime->tv_sec < 0, 0)) + if (__glibc_unlikely (abstime->tv_sec < 0)) { result = ETIMEDOUT; break; diff --git a/nptl/pthread_rwlock_timedwrlock.c b/nptl/pthread_rwlock_timedwrlock.c index 2e1390b34f..26f2139c9f 100644 --- a/nptl/pthread_rwlock_timedwrlock.c +++ b/nptl/pthread_rwlock_timedwrlock.c @@ -69,7 +69,7 @@ pthread_rwlock_timedwrlock (rwlock, abstime) /* Work around the fact that the kernel rejects negative timeout values despite them being valid. */ - if (__builtin_expect (abstime->tv_sec < 0, 0)) + if (__glibc_unlikely (abstime->tv_sec < 0)) { result = ETIMEDOUT; break; diff --git a/nptl/pthread_rwlock_tryrdlock.c b/nptl/pthread_rwlock_tryrdlock.c index 697aa80c70..f7b1e6b8ac 100644 --- a/nptl/pthread_rwlock_tryrdlock.c +++ b/nptl/pthread_rwlock_tryrdlock.c @@ -32,7 +32,7 @@ __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) && (rwlock->__data.__nr_writers_queued == 0 || PTHREAD_RWLOCK_PREFER_READER_P (rwlock))) { - if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0)) + if (__glibc_unlikely (++rwlock->__data.__nr_readers == 0)) { --rwlock->__data.__nr_readers; result = EAGAIN; diff --git a/nptl/pthread_setcancelstate.c b/nptl/pthread_setcancelstate.c index 3bb05030ed..5c3ca866bf 100644 --- a/nptl/pthread_setcancelstate.c +++ b/nptl/pthread_setcancelstate.c @@ -55,7 +55,7 @@ __pthread_setcancelstate (state, oldstate) atomically since other bits could be modified as well. */ int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval); - if (__builtin_expect (curval == oldval, 1)) + if (__glibc_likely (curval == oldval)) { if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval)) __do_cancel (); diff --git a/nptl/pthread_setcanceltype.c b/nptl/pthread_setcanceltype.c index 0a7691f5b6..fb1631f0ab 100644 --- a/nptl/pthread_setcanceltype.c +++ b/nptl/pthread_setcanceltype.c @@ -55,7 +55,7 @@ __pthread_setcanceltype (type, oldtype) atomically since other bits could be modified as well. */ int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval); - if (__builtin_expect (curval == oldval, 1)) + if (__glibc_likely (curval == oldval)) { if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval)) { diff --git a/nptl/pthread_setschedprio.c b/nptl/pthread_setschedprio.c index 033bfecf62..c58ba94719 100644 --- a/nptl/pthread_setschedprio.c +++ b/nptl/pthread_setschedprio.c @@ -48,7 +48,7 @@ pthread_setschedprio (threadid, prio) param.sched_priority = pd->tpp->priomax; /* Try to set the scheduler information. */ - if (__builtin_expect (sched_setparam (pd->tid, ¶m) == -1, 0)) + if (__glibc_unlikely (sched_setparam (pd->tid, ¶m) == -1)) result = errno; else { diff --git a/nptl/pthread_setspecific.c b/nptl/pthread_setspecific.c index 877fb02200..0ace86168f 100644 --- a/nptl/pthread_setspecific.c +++ b/nptl/pthread_setspecific.c @@ -36,7 +36,7 @@ __pthread_setspecific (key, value) /* Special case access to the first 2nd-level block. This is the usual case. */ - if (__builtin_expect (key < PTHREAD_KEY_2NDLEVEL_SIZE, 1)) + if (__glibc_likely (key < PTHREAD_KEY_2NDLEVEL_SIZE)) { /* Verify the key is sane. */ if (KEY_UNUSED ((seq = __pthread_keys[key].seq))) diff --git a/nptl/sem_init.c b/nptl/sem_init.c index 8bfb9c1f0c..7af2775113 100644 --- a/nptl/sem_init.c +++ b/nptl/sem_init.c @@ -31,7 +31,7 @@ __new_sem_init (sem, pshared, value) unsigned int value; { /* Parameter sanity check. */ - if (__builtin_expect (value > SEM_VALUE_MAX, 0)) + if (__glibc_unlikely (value > SEM_VALUE_MAX)) { __set_errno (EINVAL); return -1; @@ -66,7 +66,7 @@ __old_sem_init (sem, pshared, value) unsigned int value; { /* Parameter sanity check. */ - if (__builtin_expect (value > SEM_VALUE_MAX, 0)) + if (__glibc_unlikely (value > SEM_VALUE_MAX)) { __set_errno (EINVAL); return -1; diff --git a/nptl/sem_open.c b/nptl/sem_open.c index b57e6b7573..cf91859dab 100644 --- a/nptl/sem_open.c +++ b/nptl/sem_open.c @@ -74,10 +74,10 @@ __where_is_shmfs (void) /* OK, do it the hard way. Look through the /proc/mounts file and if this does not exist through /etc/fstab to find the mount point. */ fp = __setmntent ("/proc/mounts", "r"); - if (__builtin_expect (fp == NULL, 0)) + if (__glibc_unlikely (fp == NULL)) { fp = __setmntent (_PATH_MNTTAB, "r"); - if (__builtin_expect (fp == NULL, 0)) + if (__glibc_unlikely (fp == NULL)) /* There is nothing we can do. Blind guesses are not helpful. */ return; } diff --git a/nptl/sigaction.c b/nptl/sigaction.c index 33cf9ade41..7b01bfcd9c 100644 --- a/nptl/sigaction.c +++ b/nptl/sigaction.c @@ -34,7 +34,7 @@ __sigaction (sig, act, oact) const struct sigaction *act; struct sigaction *oact; { - if (__builtin_expect (sig == SIGCANCEL || sig == SIGSETXID, 0)) + if (__glibc_unlikely (sig == SIGCANCEL || sig == SIGSETXID)) { __set_errno (EINVAL); return -1; diff --git a/nptl/sockperf.c b/nptl/sockperf.c index d29a6ee26a..740fb883f0 100644 --- a/nptl/sockperf.c +++ b/nptl/sockperf.c @@ -479,7 +479,7 @@ get_clockfreq (void) return result; fd = open ("/proc/cpuinfo", O_RDONLY); - if (__builtin_expect (fd != -1, 1)) + if (__glibc_likely (fd != -1)) { /* XXX AFAIK the /proc filesystem can generate "files" only up to a size of 4096 bytes. */ @@ -491,7 +491,7 @@ get_clockfreq (void) { char *mhz = memmem (buf, n, "cpu MHz", 7); - if (__builtin_expect (mhz != NULL, 1)) + if (__glibc_likely (mhz != NULL)) { char *endp = buf + n; int seen_decpoint = 0; diff --git a/nptl/sysdeps/pthread/createthread.c b/nptl/sysdeps/pthread/createthread.c index 2a9a723ddb..93f93eebd4 100644 --- a/nptl/sysdeps/pthread/createthread.c +++ b/nptl/sysdeps/pthread/createthread.c @@ -56,7 +56,7 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr, PREPARE_CREATE; #endif - if (__builtin_expect (stopped != 0, 0)) + if (__glibc_unlikely (stopped != 0)) /* We make sure the thread does not run far by forcing it to get a lock. We lock it here too so that the new thread cannot continue until we tell it to. */ @@ -75,7 +75,7 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr, int rc = ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags, pd, &pd->tid, TLS_VALUE, &pd->tid); - if (__builtin_expect (rc == -1, 0)) + if (__glibc_unlikely (rc == -1)) { atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second. */ @@ -93,7 +93,7 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr, } /* Now we have the possibility to set scheduling parameters etc. */ - if (__builtin_expect (stopped != 0, 0)) + if (__glibc_unlikely (stopped != 0)) { INTERNAL_SYSCALL_DECL (err); int res = 0; @@ -104,7 +104,7 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr, res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid, attr->cpusetsize, attr->cpuset); - if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0)) + if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err))) { /* The operation failed. We have to kill the thread. First send it the cancellation signal. */ @@ -129,7 +129,7 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr, res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid, pd->schedpolicy, &pd->schedparam); - if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0)) + if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err))) goto err_out; } } @@ -183,7 +183,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr, | CLONE_CHILD_CLEARTID | CLONE_SYSVSEM | 0); - if (__builtin_expect (THREAD_GETMEM (THREAD_SELF, report_events), 0)) + if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events))) { /* The parent thread is supposed to report events. Check whether the TD_CREATE event is needed, too. */ diff --git a/nptl/sysdeps/pthread/setxid.h b/nptl/sysdeps/pthread/setxid.h index 65a1ce7bf9..76c88e0feb 100644 --- a/nptl/sysdeps/pthread/setxid.h +++ b/nptl/sysdeps/pthread/setxid.h @@ -48,7 +48,7 @@ ({ \ extern __typeof (__nptl_setxid) __nptl_setxid __attribute__((weak));\ int __result; \ - if (__builtin_expect (__nptl_setxid != NULL, 0)) \ + if (__glibc_unlikely (__nptl_setxid != NULL)) \ { \ struct xid_command __cmd; \ __cmd.syscall_no = __NR_##name; \ diff --git a/nptl/sysdeps/pthread/timer_create.c b/nptl/sysdeps/pthread/timer_create.c index 359a770c05..461d28e353 100644 --- a/nptl/sysdeps/pthread/timer_create.c +++ b/nptl/sysdeps/pthread/timer_create.c @@ -68,7 +68,7 @@ timer_create (clock_id, evp, timerid) pthread_mutex_lock (&__timer_mutex); newtimer = __timer_alloc (); - if (__builtin_expect (newtimer == NULL, 0)) + if (__glibc_unlikely (newtimer == NULL)) { __set_errno (EAGAIN); goto unlock_bail; @@ -123,7 +123,7 @@ timer_create (clock_id, evp, timerid) thread = __timer_thread_alloc (&newtimer->attr, clock_id); /* Out of luck; no threads are available. */ - if (__builtin_expect (thread == NULL, 0)) + if (__glibc_unlikely (thread == NULL)) { __set_errno (EAGAIN); goto unlock_bail; diff --git a/nptl/sysdeps/pthread/unwind-forcedunwind.c b/nptl/sysdeps/pthread/unwind-forcedunwind.c index cb94ea6c21..9d7bdc5b90 100644 --- a/nptl/sysdeps/pthread/unwind-forcedunwind.c +++ b/nptl/sysdeps/pthread/unwind-forcedunwind.c @@ -42,7 +42,7 @@ pthread_cancel_init (void) void *getcfa; void *handle; - if (__builtin_expect (libgcc_s_handle != NULL, 1)) + if (__glibc_likely (libgcc_s_handle != NULL)) { /* Force gcc to reload all values. */ asm volatile ("" ::: "memory"); @@ -93,7 +93,7 @@ __unwind_freeres (void) void _Unwind_Resume (struct _Unwind_Exception *exc) { - if (__builtin_expect (libgcc_s_handle == NULL, 0)) + if (__glibc_unlikely (libgcc_s_handle == NULL)) pthread_cancel_init (); else atomic_read_barrier (); @@ -109,7 +109,7 @@ __gcc_personality_v0 (int version, _Unwind_Action actions, struct _Unwind_Exception *ue_header, struct _Unwind_Context *context) { - if (__builtin_expect (libgcc_s_handle == NULL, 0)) + if (__glibc_unlikely (libgcc_s_handle == NULL)) pthread_cancel_init (); else atomic_read_barrier (); @@ -125,7 +125,7 @@ _Unwind_Reason_Code _Unwind_ForcedUnwind (struct _Unwind_Exception *exc, _Unwind_Stop_Fn stop, void *stop_argument) { - if (__builtin_expect (libgcc_s_handle == NULL, 0)) + if (__glibc_unlikely (libgcc_s_handle == NULL)) pthread_cancel_init (); else atomic_read_barrier (); @@ -140,7 +140,7 @@ _Unwind_ForcedUnwind (struct _Unwind_Exception *exc, _Unwind_Stop_Fn stop, _Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *context) { - if (__builtin_expect (libgcc_s_handle == NULL, 0)) + if (__glibc_unlikely (libgcc_s_handle == NULL)) pthread_cancel_init (); else atomic_read_barrier (); diff --git a/nptl/sysdeps/unix/sysv/linux/getpid.c b/nptl/sysdeps/unix/sysv/linux/getpid.c index f806f2ff87..937b1d4e11 100644 --- a/nptl/sysdeps/unix/sysv/linux/getpid.c +++ b/nptl/sysdeps/unix/sysv/linux/getpid.c @@ -27,10 +27,10 @@ static inline __attribute__((always_inline)) pid_t really_getpid (pid_t oldval); static inline __attribute__((always_inline)) pid_t really_getpid (pid_t oldval) { - if (__builtin_expect (oldval == 0, 1)) + if (__glibc_likely (oldval == 0)) { pid_t selftid = THREAD_GETMEM (THREAD_SELF, tid); - if (__builtin_expect (selftid != 0, 1)) + if (__glibc_likely (selftid != 0)) return selftid; } @@ -53,7 +53,7 @@ __getpid (void) pid_t result = INTERNAL_SYSCALL (getpid, err, 0); #else pid_t result = THREAD_GETMEM (THREAD_SELF, pid); - if (__builtin_expect (result <= 0, 0)) + if (__glibc_unlikely (result <= 0)) result = really_getpid (result); #endif return result; diff --git a/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c b/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c index 5c9ada18a0..c894b1ecda 100644 --- a/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c +++ b/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c @@ -35,7 +35,7 @@ __lll_robust_lock_wait (int *futex, int private) do { - if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0)) + if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) return oldval; int newval = oldval | FUTEX_WAITERS; @@ -72,7 +72,7 @@ __lll_robust_timedlock_wait (int *futex, const struct timespec *abstime, /* Work around the fact that the kernel rejects negative timeout values despite them being valid. */ - if (__builtin_expect (abstime->tv_sec < 0, 0)) + if (__glibc_unlikely (abstime->tv_sec < 0)) return ETIMEDOUT; do @@ -100,7 +100,7 @@ __lll_robust_timedlock_wait (int *futex, const struct timespec *abstime, #endif /* Wait. */ - if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0)) + if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) return oldval; int newval = oldval | FUTEX_WAITERS; diff --git a/nptl/sysdeps/unix/sysv/linux/mq_notify.c b/nptl/sysdeps/unix/sysv/linux/mq_notify.c index 0d20919667..3138ad2c3c 100644 --- a/nptl/sysdeps/unix/sysv/linux/mq_notify.c +++ b/nptl/sysdeps/unix/sysv/linux/mq_notify.c @@ -266,7 +266,7 @@ mq_notify (mqd_t mqdes, const struct sigevent *notification) /* If we cannot create the netlink socket we cannot provide SIGEV_THREAD support. */ - if (__builtin_expect (netlink_socket == -1, 0)) + if (__glibc_unlikely (netlink_socket == -1)) { __set_errno (ENOSYS); return -1; @@ -299,7 +299,7 @@ mq_notify (mqd_t mqdes, const struct sigevent *notification) int retval = INLINE_SYSCALL (mq_notify, 2, mqdes, &se); /* If it failed, free the allocated memory. */ - if (__builtin_expect (retval != 0, 0)) + if (__glibc_unlikely (retval != 0)) free (data.attr); return retval; diff --git a/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h index fe856708a4..ab92c3fc8f 100644 --- a/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h +++ b/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h @@ -306,7 +306,7 @@ extern int __lll_robust_timedlock_wait ((void) ({ \ int *__futex = &(lock); \ int __val = atomic_exchange_rel (__futex, 0); \ - if (__builtin_expect (__val > 1, 0)) \ + if (__glibc_unlikely (__val > 1)) \ lll_futex_wake (__futex, 1, private); \ })) @@ -314,7 +314,7 @@ extern int __lll_robust_timedlock_wait ((void) ({ \ int *__futex = &(lock); \ int __val = atomic_exchange_rel (__futex, 0); \ - if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \ + if (__glibc_unlikely (__val & FUTEX_WAITERS)) \ lll_futex_wake (__futex, 1, private); \ })) diff --git a/nptl/sysdeps/unix/sysv/linux/pt-raise.c b/nptl/sysdeps/unix/sysv/linux/pt-raise.c index 68059db8db..74762b749b 100644 --- a/nptl/sysdeps/unix/sysv/linux/pt-raise.c +++ b/nptl/sysdeps/unix/sysv/linux/pt-raise.c @@ -31,7 +31,7 @@ raise (sig) fork function temporarily invalidated the PID field. Adjust for that. */ pid_t pid = THREAD_GETMEM (THREAD_SELF, pid); - if (__builtin_expect (pid < 0, 0)) + if (__glibc_unlikely (pid < 0)) pid = -pid; return INLINE_SYSCALL (tgkill, 3, pid, THREAD_GETMEM (THREAD_SELF, tid), diff --git a/nptl/sysdeps/unix/sysv/linux/pthread_kill.c b/nptl/sysdeps/unix/sysv/linux/pthread_kill.c index 43e5c02b37..f8957ae17c 100644 --- a/nptl/sysdeps/unix/sysv/linux/pthread_kill.c +++ b/nptl/sysdeps/unix/sysv/linux/pthread_kill.c @@ -40,7 +40,7 @@ __pthread_kill (threadid, signo) if a thread exits between ESRCH test and tgkill, we might return EINVAL, because pd->tid would be cleared by the kernel. */ pid_t tid = atomic_forced_read (pd->tid); - if (__builtin_expect (tid <= 0, 0)) + if (__glibc_unlikely (tid <= 0)) /* Not a valid thread handle. */ return ESRCH; diff --git a/nptl/sysdeps/unix/sysv/linux/pthread_setaffinity.c b/nptl/sysdeps/unix/sysv/linux/pthread_setaffinity.c index 288d73f8c8..874cf4b578 100644 --- a/nptl/sysdeps/unix/sysv/linux/pthread_setaffinity.c +++ b/nptl/sysdeps/unix/sysv/linux/pthread_setaffinity.c @@ -61,7 +61,7 @@ __pthread_setaffinity_new (pthread_t th, size_t cpusetsize, INTERNAL_SYSCALL_DECL (err); int res; - if (__builtin_expect (__kernel_cpumask_size == 0, 0)) + if (__glibc_unlikely (__kernel_cpumask_size == 0)) { res = __determine_cpumask_size (pd->tid); if (res != 0) diff --git a/nptl/sysdeps/unix/sysv/linux/pthread_sigqueue.c b/nptl/sysdeps/unix/sysv/linux/pthread_sigqueue.c index 9c3e73cdea..8dee3a69d2 100644 --- a/nptl/sysdeps/unix/sysv/linux/pthread_sigqueue.c +++ b/nptl/sysdeps/unix/sysv/linux/pthread_sigqueue.c @@ -44,7 +44,7 @@ pthread_sigqueue (threadid, signo, value) if a thread exits between ESRCH test and tgkill, we might return EINVAL, because pd->tid would be cleared by the kernel. */ pid_t tid = atomic_forced_read (pd->tid); - if (__builtin_expect (tid <= 0, 0)) + if (__glibc_unlikely (tid <= 0)) /* Not a valid thread handle. */ return ESRCH; diff --git a/nptl/sysdeps/unix/sysv/linux/raise.c b/nptl/sysdeps/unix/sysv/linux/raise.c index 321d9c3543..67c9b1f23e 100644 --- a/nptl/sysdeps/unix/sysv/linux/raise.c +++ b/nptl/sysdeps/unix/sysv/linux/raise.c @@ -50,7 +50,7 @@ raise (sig) /* raise is an async-safe function. It could be called while the fork/vfork function temporarily invalidated the PID field. Adjust for that. */ - if (__builtin_expect (pid <= 0, 0)) + if (__glibc_unlikely (pid <= 0)) pid = (pid & INT_MAX) == 0 ? selftid : -pid; return INLINE_SYSCALL (tgkill, 3, pid, selftid, sig); diff --git a/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h index 80dc90543b..864dcbccc0 100644 --- a/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h +++ b/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h @@ -233,7 +233,7 @@ static inline void __attribute__ ((always_inline)) __lll_lock (int *futex, int private) { - if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 1, 0), 0)) + if (__glibc_unlikely (atomic_compare_and_exchange_bool_acq (futex, 1, 0))) { if (__builtin_constant_p (private) && private == LLL_PRIVATE) __lll_lock_wait_private (futex); @@ -260,7 +260,7 @@ static inline void __attribute__ ((always_inline)) __lll_cond_lock (int *futex, int private) { - if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 2, 0), 0)) + if (__glibc_unlikely (atomic_compare_and_exchange_bool_acq (futex, 2, 0))) __lll_lock_wait (futex, private); } #define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private) @@ -278,7 +278,7 @@ __attribute__ ((always_inline)) __lll_timedlock (int *futex, const struct timespec *abstime, int private) { int result = 0; - if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 1, 0), 0)) + if (__glibc_unlikely (atomic_compare_and_exchange_bool_acq (futex, 1, 0))) result = __lll_timedlock_wait (futex, abstime, private); return result; } @@ -307,7 +307,7 @@ __lll_robust_timedlock (int *futex, const struct timespec *abstime, int *__futexp = (futex); \ \ lll_compare_and_swap (__futexp, __oldval, __newval, "slr %2,%2"); \ - if (__builtin_expect (__oldval > 1, 0)) \ + if (__glibc_unlikely (__oldval > 1)) \ lll_futex_wake (__futexp, 1, private); \ }) #define lll_unlock(futex, private) __lll_unlock(&(futex), private) @@ -320,7 +320,7 @@ __lll_robust_timedlock (int *futex, const struct timespec *abstime, int *__futexp = (futex); \ \ lll_compare_and_swap (__futexp, __oldval, __newval, "slr %2,%2"); \ - if (__builtin_expect (__oldval & FUTEX_WAITERS, 0)) \ + if (__glibc_unlikely (__oldval & FUTEX_WAITERS)) \ lll_futex_wake (__futexp, 1, private); \ }) #define lll_robust_unlock(futex, private) \ diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h index d851d74542..d4ed7a9bd7 100644 --- a/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h +++ b/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h @@ -223,7 +223,7 @@ __lll_lock (int *futex, int private) { int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0); - if (__builtin_expect (val != 0, 0)) + if (__glibc_unlikely (val != 0)) { if (__builtin_constant_p (private) && private == LLL_PRIVATE) __lll_lock_wait_private (futex); @@ -251,7 +251,7 @@ __lll_cond_lock (int *futex, int private) { int val = atomic_compare_and_exchange_val_24_acq (futex, 2, 0); - if (__builtin_expect (val != 0, 0)) + if (__glibc_unlikely (val != 0)) __lll_lock_wait (futex, private); } #define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private) @@ -272,7 +272,7 @@ __lll_timedlock (int *futex, const struct timespec *abstime, int private) int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0); int result = 0; - if (__builtin_expect (val != 0, 0)) + if (__glibc_unlikely (val != 0)) result = __lll_timedlock_wait (futex, abstime, private); return result; } @@ -296,7 +296,7 @@ __lll_robust_timedlock (int *futex, const struct timespec *abstime, ((void) ({ \ int *__futex = &(lock); \ int __val = atomic_exchange_24_rel (__futex, 0); \ - if (__builtin_expect (__val > 1, 0)) \ + if (__glibc_unlikely (__val > 1)) \ lll_futex_wake (__futex, 1, private); \ })) @@ -304,7 +304,7 @@ __lll_robust_timedlock (int *futex, const struct timespec *abstime, ((void) ({ \ int *__futex = &(lock); \ int __val = atomic_exchange_rel (__futex, 0); \ - if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \ + if (__glibc_unlikely (__val & FUTEX_WAITERS)) \ lll_futex_wake (__futex, 1, private); \ })) diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c b/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c index 4ce7871fbd..2221a27e22 100644 --- a/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c +++ b/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c @@ -33,7 +33,7 @@ pthread_barrier_destroy (barrier) lll_lock (ibarrier->b.lock, private); - if (__builtin_expect (ibarrier->b.left == ibarrier->b.init_count, 1)) + if (__glibc_likely (ibarrier->b.left == ibarrier->b.init_count)) /* The barrier is not used anymore. */ result = 0; else diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c b/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c index 9bc7a05333..6af686361d 100644 --- a/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c +++ b/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c @@ -28,7 +28,7 @@ pthread_barrier_init (barrier, attr, count) { union sparc_pthread_barrier *ibarrier; - if (__builtin_expect (count == 0, 0)) + if (__glibc_unlikely (count == 0)) return EINVAL; struct pthread_barrierattr *iattr = (struct pthread_barrierattr *) attr; diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/sem_init.c b/nptl/sysdeps/unix/sysv/linux/sparc/sem_init.c index d415366f4c..cbefdc4823 100644 --- a/nptl/sysdeps/unix/sysv/linux/sparc/sem_init.c +++ b/nptl/sysdeps/unix/sysv/linux/sparc/sem_init.c @@ -32,7 +32,7 @@ __new_sem_init (sem, pshared, value) unsigned int value; { /* Parameter sanity check. */ - if (__builtin_expect (value > SEM_VALUE_MAX, 0)) + if (__glibc_unlikely (value > SEM_VALUE_MAX)) { __set_errno (EINVAL); return -1; @@ -66,7 +66,7 @@ __old_sem_init (sem, pshared, value) unsigned int value; { /* Parameter sanity check. */ - if (__builtin_expect (value > SEM_VALUE_MAX, 0)) + if (__glibc_unlikely (value > SEM_VALUE_MAX)) { __set_errno (EINVAL); return -1; diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/timer_create.c b/nptl/sysdeps/unix/sysv/linux/x86_64/timer_create.c index e46eef426c..7624f2cdb2 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/timer_create.c +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/timer_create.c @@ -50,7 +50,7 @@ __timer_create_old (clockid_t clock_id, struct sigevent *evp, int *timerid) break; } - if (__builtin_expect (i == OLD_TIMER_MAX, 0)) + if (__glibc_unlikely (i == OLD_TIMER_MAX)) { /* No free slot. */ (void) __timer_delete_new (newp); diff --git a/nptl/unwind.c b/nptl/unwind.c index fb2489cd96..a71015a2f1 100644 --- a/nptl/unwind.c +++ b/nptl/unwind.c @@ -62,7 +62,7 @@ unwind_stop (int version, _Unwind_Action actions, adj)) do_longjump = 1; - if (__builtin_expect (curp != NULL, 0)) + if (__glibc_unlikely (curp != NULL)) { /* Handle the compatibility stuff. Execute all handlers registered with the old method which would be unwound by this |