diff options
author | Torvald Riegel <triegel@redhat.com> | 2016-11-30 17:53:11 +0100 |
---|---|---|
committer | Torvald Riegel <triegel@redhat.com> | 2016-12-05 16:19:43 +0100 |
commit | ca6e601a9d4a72b3699cca15bad12ac1716bf49a (patch) | |
tree | fd761ea31c43377d02f2a097f8030411163d6905 /sysdeps/unix/sysv/linux | |
parent | 71be79a25f1d9efeafa5c634c4499281e8c313f2 (diff) | |
download | glibc-ca6e601a9d4a72b3699cca15bad12ac1716bf49a.tar.gz |
Use C11-like atomics instead of plain memory accesses in x86 lock elision.
This uses atomic operations to access lock elision metadata that is accessed
concurrently (ie, adapt_count fields). The size of the data is less than a
word but accessed only with atomic loads and stores; therefore, we add
support for shorter-size atomic load and stores too.
* include/atomic.h (__atomic_check_size_ls): New.
(atomic_load_relaxed, atomic_load_acquire, atomic_store_relaxed,
atomic_store_release): Use it.
* sysdeps/x86/elide.h (ACCESS_ONCE): Remove.
(elision_adapt, ELIDE_LOCK): Use atomics.
* sysdeps/unix/sysv/linux/x86/elision-lock.c (__lll_lock_elision): Use
atomics and improve code comments.
* sysdeps/unix/sysv/linux/x86/elision-trylock.c
(__lll_trylock_elision): Likewise.
Diffstat (limited to 'sysdeps/unix/sysv/linux')
-rw-r--r-- | sysdeps/unix/sysv/linux/x86/elision-lock.c | 28 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/x86/elision-trylock.c | 18 |
2 files changed, 31 insertions, 15 deletions
diff --git a/sysdeps/unix/sysv/linux/x86/elision-lock.c b/sysdeps/unix/sysv/linux/x86/elision-lock.c index 5e66960123..c05ade4722 100644 --- a/sysdeps/unix/sysv/linux/x86/elision-lock.c +++ b/sysdeps/unix/sysv/linux/x86/elision-lock.c @@ -44,7 +44,13 @@ int __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private) { - if (*adapt_count <= 0) + /* adapt_count can be accessed concurrently; these accesses can be both + inside of transactions (if critical sections are nested and the outer + critical section uses lock elision) and outside of transactions. Thus, + we need to use atomic accesses to avoid data races. However, the + value of adapt_count is just a hint, so relaxed MO accesses are + sufficient. */ + if (atomic_load_relaxed (adapt_count) <= 0) { unsigned status; int try_xbegin; @@ -70,15 +76,20 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private) && _XABORT_CODE (status) == _ABORT_LOCK_BUSY) { /* Right now we skip here. Better would be to wait a bit - and retry. This likely needs some spinning. */ - if (*adapt_count != aconf.skip_lock_busy) - *adapt_count = aconf.skip_lock_busy; + and retry. This likely needs some spinning. See + above for why relaxed MO is sufficient. */ + if (atomic_load_relaxed (adapt_count) + != aconf.skip_lock_busy) + atomic_store_relaxed (adapt_count, aconf.skip_lock_busy); } /* Internal abort. There is no chance for retry. Use the normal locking and next time use lock. - Be careful to avoid writing to the lock. */ - else if (*adapt_count != aconf.skip_lock_internal_abort) - *adapt_count = aconf.skip_lock_internal_abort; + Be careful to avoid writing to the lock. See above for why + relaxed MO is sufficient. */ + else if (atomic_load_relaxed (adapt_count) + != aconf.skip_lock_internal_abort) + atomic_store_relaxed (adapt_count, + aconf.skip_lock_internal_abort); break; } } @@ -87,7 +98,8 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private) { /* Use a normal lock until the threshold counter runs out. Lost updates possible. */ - (*adapt_count)--; + atomic_store_relaxed (adapt_count, + atomic_load_relaxed (adapt_count) - 1); } /* Use a normal lock here. */ diff --git a/sysdeps/unix/sysv/linux/x86/elision-trylock.c b/sysdeps/unix/sysv/linux/x86/elision-trylock.c index 65d9c18584..2d44f50426 100644 --- a/sysdeps/unix/sysv/linux/x86/elision-trylock.c +++ b/sysdeps/unix/sysv/linux/x86/elision-trylock.c @@ -36,8 +36,10 @@ __lll_trylock_elision (int *futex, short *adapt_count) return an error. */ _xabort (_ABORT_NESTED_TRYLOCK); - /* Only try a transaction if it's worth it. */ - if (*adapt_count <= 0) + /* Only try a transaction if it's worth it. See __lll_lock_elision for + why we need atomic accesses. Relaxed MO is sufficient because this is + just a hint. */ + if (atomic_load_relaxed (adapt_count) <= 0) { unsigned status; @@ -55,16 +57,18 @@ __lll_trylock_elision (int *futex, short *adapt_count) if (!(status & _XABORT_RETRY)) { /* Internal abort. No chance for retry. For future - locks don't try speculation for some time. */ - if (*adapt_count != aconf.skip_trylock_internal_abort) - *adapt_count = aconf.skip_trylock_internal_abort; + locks don't try speculation for some time. See above for MO. */ + if (atomic_load_relaxed (adapt_count) + != aconf.skip_lock_internal_abort) + atomic_store_relaxed (adapt_count, aconf.skip_lock_internal_abort); } /* Could do some retries here. */ } else { - /* Lost updates are possible, but harmless. */ - (*adapt_count)--; + /* Lost updates are possible but harmless (see above). */ + atomic_store_relaxed (adapt_count, + atomic_load_relaxed (adapt_count) - 1); } return lll_trylock (*futex); |