diff options
-rw-r--r-- | storage/innobase/include/os0sync.h | 11 | ||||
-rw-r--r-- | storage/innobase/include/sync0sync.ic | 5 | ||||
-rw-r--r-- | storage/xtradb/include/os0sync.h | 11 | ||||
-rw-r--r-- | storage/xtradb/include/sync0sync.ic | 5 |
4 files changed, 14 insertions, 18 deletions
diff --git a/storage/innobase/include/os0sync.h b/storage/innobase/include/os0sync.h index 213974b01de..af58a232746 100644 --- a/storage/innobase/include/os0sync.h +++ b/storage/innobase/include/os0sync.h @@ -348,20 +348,13 @@ os_atomic_test_and_set(volatile lock_word_t* ptr) } /** Do an atomic release. - -In theory __sync_lock_release should be used to release the lock. -Unfortunately, it does not work properly alone. The workaround is -that more conservative __sync_lock_test_and_set is used instead. - -Performance regression was observed at some conditions for Intel -architecture. Disable release barrier on Intel architecture for now. @param[in,out] ptr Memory location to write to @return the previous value */ static inline -lock_word_t +void os_atomic_clear(volatile lock_word_t* ptr) { - return(__sync_lock_test_and_set(ptr, 0)); + __sync_lock_release(ptr); } # elif defined(HAVE_IB_GCC_ATOMIC_TEST_AND_SET) diff --git a/storage/innobase/include/sync0sync.ic b/storage/innobase/include/sync0sync.ic index 1120da8a3be..d0f266309fc 100644 --- a/storage/innobase/include/sync0sync.ic +++ b/storage/innobase/include/sync0sync.ic @@ -178,6 +178,11 @@ mutex_exit_func( to wake up possible hanging threads if they are missed in mutex_signal_object. */ + /* We add a memory barrier to prevent reading of the + number of waiters before releasing the lock. */ + + os_mb; + if (mutex_get_waiters(mutex) != 0) { mutex_signal_object(mutex); diff --git a/storage/xtradb/include/os0sync.h b/storage/xtradb/include/os0sync.h index b52c078fa54..08da9dff4e3 100644 --- a/storage/xtradb/include/os0sync.h +++ b/storage/xtradb/include/os0sync.h @@ -381,20 +381,13 @@ os_atomic_test_and_set(volatile lock_word_t* ptr) } /** Do an atomic release. - -In theory __sync_lock_release should be used to release the lock. -Unfortunately, it does not work properly alone. The workaround is -that more conservative __sync_lock_test_and_set is used instead. - -Performance regression was observed at some conditions for Intel -architecture. Disable release barrier on Intel architecture for now. @param[in,out] ptr Memory location to write to @return the previous value */ static inline -lock_word_t +void os_atomic_clear(volatile lock_word_t* ptr) { - return(__sync_lock_test_and_set(ptr, 0)); + __sync_lock_release(ptr); } # elif defined(HAVE_IB_GCC_ATOMIC_TEST_AND_SET) diff --git a/storage/xtradb/include/sync0sync.ic b/storage/xtradb/include/sync0sync.ic index 48039c854d9..c733becf6df 100644 --- a/storage/xtradb/include/sync0sync.ic +++ b/storage/xtradb/include/sync0sync.ic @@ -178,6 +178,11 @@ mutex_exit_func( to wake up possible hanging threads if they are missed in mutex_signal_object. */ + /* We add a memory barrier to prevent reading of the + number of waiters before releasing the lock. */ + + os_mb; + if (mutex_get_waiters(mutex) != 0) { mutex_signal_object(mutex); |